def test_log_it(): log = mock.Mock(spec=("debug", )) utils.logging_debug(log, "hello dad! {} {where}", "I'm", where="in jail") log.debug.assert_called_once_with("hello dad! I'm in jail")
def _datastore_run_query(query): """Run a query in Datastore. Args: query (query.QueryOptions): The query spec. Returns: tasklets.Future: """ query_pb = _query_to_protobuf(query) partition_id = entity_pb2.PartitionId( project_id=query.project, namespace_id=query.namespace ) read_options = _datastore_api.get_read_options( query, default_read_consistency=_datastore_api.EVENTUAL ) request = datastore_pb2.RunQueryRequest( project_id=query.project, partition_id=partition_id, query=query_pb, read_options=read_options, ) response = yield _datastore_api.make_call( "RunQuery", request, timeout=query.timeout ) utils.logging_debug(log, response) raise tasklets.Return(response)
def test_noop(): log = mock.Mock(spec=("debug", )) utils.logging_debug(log, "hello dad! {} {where}", "I'm", where="in jail") log.debug.assert_not_called()
def _update_key(key, new_value): success = False while not success: old_value = yield _global_get(key) utils.logging_debug(log, "old value: {}", old_value) value = new_value(old_value) utils.logging_debug(log, "new value: {}", value) # pragma: SYNCPOINT update key if old_value is not None: utils.logging_debug(log, "compare and swap") yield _global_watch(key, old_value) success = yield _global_compare_and_swap(key, value, expires=_LOCK_TIME) else: utils.logging_debug(log, "set if not exists") success = yield global_set_if_not_exists(key, value, expires=_LOCK_TIME) utils.logging_debug(log, "success: {}", success)
def global_unlock_for_write(key, lock): """Remove a lock for key by updating or removing a lock value. The lock represented by the ``lock`` argument will be released. Args: key (bytes): The key to lock. lock (bytes): The return value from the call :func:`global_lock` which acquired the lock. Returns: tasklets.Future: Eventual result will be :data:`None`. """ utils.logging_debug(log, "unlock for write: {}", lock) def new_value(old_value): assert lock in old_value, "attempt to remove lock that isn't present" value = old_value.replace(lock, b"") if value == _LOCKED_FOR_WRITE: value = b"" return value cache = _global_cache() try: yield _update_key(key, new_value) except cache.transient_errors: # Worst case scenario, lock sticks around for longer than we'd like pass
def global_lock_for_write(key): """Lock a key for a write (put) operation, by setting or updating a special value. There can be multiple write locks for a given key. Key will only be released when all write locks have been released. Args: key (bytes): The key to lock. Returns: tasklets.Future: Eventual result will be a lock value to be used later with :func:`global_unlock`. """ lock = "." + str(uuid.uuid4()) lock = lock.encode("ascii") utils.logging_debug(log, "lock for write: {}", lock) def new_value(old_value): if old_value and old_value.startswith(_LOCKED_FOR_WRITE): return old_value + lock return _LOCKED_FOR_WRITE + lock yield _update_key(key, new_value) raise tasklets.Return(lock)
def _transaction_async(context, callback, read_only=False): # Avoid circular import in Python 2.7 from google.cloud.ndb import _datastore_api # Start the transaction utils.logging_debug(log, "Start transaction") transaction_id = yield _datastore_api.begin_transaction(read_only, retries=0) utils.logging_debug(log, "Transaction Id: {}", transaction_id) on_commit_callbacks = [] tx_context = context.new( transaction=transaction_id, on_commit_callbacks=on_commit_callbacks, batches=None, commit_batches=None, cache=None, # We could just pass `None` here and let the `Context` constructor # instantiate a new event loop, but our unit tests inject a subclass of # `EventLoop` that makes testing a little easier. This makes sure the # new event loop is of the same type as the current one, to propagate # the event loop class used for testing. eventloop=type(context.eventloop)(), retry=context.get_retry_state(), ) # The outer loop is dependent on the inner loop def run_inner_loop(inner_context): with inner_context.use(): if inner_context.eventloop.run1(): return True # schedule again context.eventloop.add_idle(run_inner_loop, tx_context) with tx_context.use(): try: # Run the callback result = callback() if isinstance(result, tasklets.Future): result = yield result # Make sure we've run everything we can run before calling commit _datastore_api.prepare_to_commit(transaction_id) tx_context.eventloop.run() # Commit the transaction yield _datastore_api.commit(transaction_id, retries=0) # Rollback if there is an error except Exception as e: # noqa: E722 tx_context.cache.clear() yield _datastore_api.rollback(transaction_id) raise e tx_context._clear_global_cache() for callback in on_commit_callbacks: callback() raise tasklets.Return(result)
def lookup_callback(self, rpc): """Process the results of a call to Datastore Lookup. Each key in the batch will be in one of `found`, `missing`, or `deferred`. `found` keys have their futures' results set with the protocol buffers for their entities. `missing` keys have their futures' results with `_NOT_FOUND`, a sentinel value. `deferrred` keys are loaded into a new batch so they can be tried again. Args: rpc (tasklets.Future): If not an exception, the result will be an instance of :class:`google.cloud.datastore_v1.datastore_pb.LookupResponse` """ # If RPC has resulted in an exception, propagate that exception to all # waiting futures. exception = rpc.exception() if exception is not None: for future in itertools.chain(*self.todo.values()): future.set_exception(exception) return # Process results, which are divided into found, missing, and deferred results = rpc.result() utils.logging_debug(log, results) # For all deferred keys, batch them up again with their original # futures if results.deferred: next_batch = _batch.get_batch(type(self), self.options) for key in results.deferred: todo_key = key.SerializeToString() next_batch.todo.setdefault(todo_key, []).extend(self.todo[todo_key]) # For all missing keys, set result to _NOT_FOUND and let callers decide # how to handle for result in results.missing: todo_key = result.entity.key.SerializeToString() for future in self.todo[todo_key]: future.set_result(_NOT_FOUND) # For all found entities, set the result on their corresponding futures for result in results.found: entity = result.entity todo_key = entity.key.SerializeToString() for future in self.todo[todo_key]: future.set_result(entity)
def global_unlock_for_write(key, lock): """Remove a lock for key by updating or removing a lock value. The lock represented by the ``lock`` argument will be released. Args: key (bytes): The key to lock. lock (bytes): The return value from the call :func:`global_lock` which acquired the lock. Returns: tasklets.Future: Eventual result will be :data:`None`. """ utils.logging_debug(log, "unlock for write: {}", lock) def new_value(old_value): value = old_value if value and lock in value: value = value.replace(lock, b"") else: warnings.warn( "Attempt to remove a lock that doesn't exist. This is mostly likely " "caused by a long running operation and the lock timing out.", RuntimeWarning, ) if value == _LOCKED_FOR_WRITE: value = b"" if value and not value.startswith(_LOCKED_FOR_WRITE): # If this happens, it means the lock expired and something else got written # to the cache in the meantime. Whatever value that is, since there was a # write operation that is concluding now, we should consider it stale and # write a blank value. value = b"" return value cache = _global_cache() try: yield _update_key(key, new_value) except cache.transient_errors: # Worst case scenario, lock sticks around for longer than we'd like pass
def rpc_call(): context = context_module.get_toplevel_context() call = method.future(request, timeout=timeout) rpc = _remote.RemoteCall(call, rpc_name) utils.logging_debug(log, rpc) utils.logging_debug(log, "timeout={}", timeout) try: result = yield rpc except Exception as error: if isinstance(error, grpc.Call): error = core_exceptions.from_grpc_error(error) raise error finally: context.rpc_time += rpc.elapsed_time raise tasklets.Return(result)
def _process_commit(rpc, futures): """Process the results of a commit request. For each mutation, set the result to the key handed back from Datastore. If a key wasn't allocated for the mutation, this will be :data:`None`. Args: rpc (tasklets.Tasklet): If not an exception, the result will be an instance of :class:`google.cloud.datastore_v1.datastore_pb2.CommitResponse` futures (List[tasklets.Future]): List of futures waiting on results. """ # If RPC has resulted in an exception, propagate that exception to all # waiting futures. exception = rpc.exception() if exception is not None: for future in futures: if not future.done(): future.set_exception(exception) return # "The i-th mutation result corresponds to the i-th mutation in the # request." # # https://github.com/googleapis/googleapis/blob/master/google/datastore/v1/datastore.proto#L241 response = rpc.result() utils.logging_debug(log, response) results_futures = zip(response.mutation_results, futures) for mutation_result, future in results_futures: if future.done(): continue # Datastore only sends a key if one is allocated for the # mutation. Confusingly, though, if a key isn't allocated, instead # of getting None, we get a key with an empty path. if mutation_result.key.path: key = mutation_result.key else: key = None future.set_result(key)
def run0(self): """Run one item (a callback or an RPC wait_any). Returns: float: A time to sleep if something happened (may be 0); None if all queues are empty. """ if self._run_current() or self.run_idle(): return 0 delay = None if self.queue: delay = self.queue[0][0] - time.time() if delay <= 0: self.inactive = 0 _, callback, args, kwargs = self.queue.pop(0) utils.logging_debug(log, "event: {}", callback.__name__) callback(*args, **kwargs) return 0 if self.rpcs: # Avoid circular import from google.cloud.ndb import context as context_module context = context_module.get_toplevel_context() # This potentially blocks, waiting for an rpc to finish and put its # result on the queue. Functionally equivalent to the ``wait_any`` # call that was used here in legacy NDB. start_time = time.time() rpc_id, rpc = self.rpc_results.get() elapsed = time.time() - start_time utils.logging_debug(log, "Blocked for {}s awaiting RPC results.", elapsed) context.wait_time += elapsed callback = self.rpcs.pop(rpc_id) callback(rpc) return 0 return delay
def run_idle(self): """Run one of the idle callbacks. Returns: bool: Indicates if an idle calback was called. """ if not self.idlers or self.inactive >= len(self.idlers): return False idler = self.idlers.popleft() callback, args, kwargs = idler utils.logging_debug(log, "idler: {}", callback.__name__) result = callback(*args, **kwargs) # See add_idle() for meaning of callback return value. if result is None: utils.logging_debug(log, "idler {} removed", callback.__name__) else: if result: self.inactive = 0 else: self.inactive += 1 self.idlers.append(idler) return True
def test_logging_debug(): with pytest.raises(NotImplementedError): utils.logging_debug()
def clear(self): """Remove all pending events without running any.""" while self.current or self.idlers or self.queue or self.rpcs: current = self.current idlers = self.idlers queue = self.queue rpcs = self.rpcs utils.logging_debug(log, "Clearing stale EventLoop instance...") if current: utils.logging_debug(log, " current = {}", current) if idlers: utils.logging_debug(log, " idlers = {}", idlers) if queue: utils.logging_debug(log, " queue = {}", queue) if rpcs: utils.logging_debug(log, " rpcs = {}", rpcs) self.__init__() current.clear() idlers.clear() queue[:] = [] rpcs.clear() utils.logging_debug(log, "Cleared")