Exemple #1
0
        def some_tasklet():
            # This tasklet runs in the main loop. In order to get results back
            # from the transaction_async calls, the run_inner_loop idle handler
            # will have to be run.
            yield [
                _transaction.transaction_async(callback),
                _transaction.transaction_async(callback),
            ]

            # Scheduling this sleep call forces the run_inner_loop idle handler
            # to be run again so we can run it in the case when there is no
            # more work to be done in the transaction. (Branch coverage.)
            yield tasklets.sleep(0)

            raise tasklets.Return("I tried, momma.")
Exemple #2
0
def fetch(query):
    """Fetch query results.

    Args:
        query (query.QueryOptions): The query spec.

    Returns:
        tasklets.Future: Result is List[Union[model.Model, key.Key]]: The query
            results.
    """
    results = iterate(query)
    entities = []
    while (yield results.has_next_async()):
        entities.append(results.next())

    raise tasklets.Return(entities)
Exemple #3
0
            def retry_wrapper(key, *args, **kwargs):
                sleep_generator = core_retry.exponential_sleep_generator(
                    0.1, 1)
                attempts = 5
                for sleep_time in sleep_generator:  # pragma: NO BRANCH
                    # pragma is required because loop never exits normally, it only gets
                    # raised out of.
                    attempts -= 1
                    try:
                        result = yield wrapped(key, *args, **kwargs)
                        raise tasklets.Return(result)
                    except transient_errors:
                        if not attempts:
                            raise

                    yield tasklets.sleep(sleep_time)
    def rpc_call():
        context = context_module.get_toplevel_context()

        call = method.future(request, timeout=timeout)
        rpc = _remote.RemoteCall(call, "{}({})".format(rpc_name, request))
        log.debug(rpc)
        log.debug("timeout={}".format(timeout))

        try:
            result = yield rpc
        except Exception as error:
            if isinstance(error, grpc.Call):
                error = core_exceptions.from_grpc_error(error)
            raise error
        finally:
            context.rpc_time += rpc.elapsed_time

        raise tasklets.Return(result)
Exemple #5
0
    def rpc_call():
        context = context_module.get_toplevel_context()

        call = method.future(request, timeout=timeout)
        rpc = _remote.RemoteCall(call, rpc_name)
        utils.logging_debug(log, rpc)
        utils.logging_debug(log, "timeout={}", timeout)

        try:
            result = yield rpc
        except Exception as error:
            if isinstance(error, grpc.Call):
                error = core_exceptions.from_grpc_error(error)
            raise error
        finally:
            context.rpc_time += rpc.elapsed_time

        raise tasklets.Return(result)
Exemple #6
0
def begin_transaction(read_only, retries=None, timeout=None):
    """Start a new transction.

    Args:
        read_only (bool): Whether to start a read-only or read-write
            transaction.
        retries (int): Number of times to potentially retry the call. If
            :data:`None` is passed, will use :data:`_retry._DEFAULT_RETRIES`.
            If :data:`0` is passed, the call is attempted only once.
        timeout (float): Timeout, in seconds, to pass to gRPC call. If
            :data:`None` is passed, will use :data:`_DEFAULT_TIMEOUT`.

    Returns:
        tasklets.Future: Result will be Transaction Id (bytes) of new
            transaction.
    """
    response = yield _datastore_begin_transaction(read_only,
                                                  retries=retries,
                                                  timeout=timeout)
    raise tasklets.Return(response.transaction)
Exemple #7
0
def _count_by_skipping(query):
    limit = query.limit
    query = query.copy(projection=["__key__"], order_by=None, limit=1)
    count = 0
    more_results = NOT_FINISHED
    cursor = None

    while more_results != NO_MORE_RESULTS:
        if limit:
            offset = limit - count - 1
        else:
            offset = 10000

        query = query.copy(offset=offset, start_cursor=cursor)
        response = yield _datastore_run_query(query)
        batch = response.batch

        # The Datastore emulator will never set more_results to NO_MORE_RESULTS,
        # so for a workaround, just bail as soon as we neither skip nor retrieve any
        # results
        new_count = batch.skipped_results + len(batch.entity_results)
        if new_count == 0 and more_results != NOT_FINISHED:
            break

        count += new_count
        if limit and count >= limit:
            break

        # The Datastore emulator won't set end_cursor to something useful if no results
        # are returned, so the workaround is to use skipped_cursor in that case
        if len(batch.entity_results):
            cursor = Cursor(batch.end_cursor)
        else:
            cursor = Cursor(batch.skipped_cursor)

        more_results = batch.more_results

    raise tasklets.Return(count)
Exemple #8
0
def _transaction_async(context, callback, read_only=False):
    # Avoid circular import in Python 2.7
    from google.cloud.ndb import _datastore_api

    # Start the transaction
    log.debug("Start transaction")
    transaction_id = yield _datastore_api.begin_transaction(read_only,
                                                            retries=0)
    log.debug("Transaction Id: {}".format(transaction_id))

    on_commit_callbacks = []
    tx_context = context.new(
        transaction=transaction_id,
        on_commit_callbacks=on_commit_callbacks,
        cache=None,  # Use new, empty cache for transaction
    )
    with tx_context.use():
        try:
            # Run the callback
            result = callback()
            if isinstance(result, tasklets.Future):
                result = yield result

            # Commit the transaction
            yield _datastore_api.commit(transaction_id, retries=0)

        # Rollback if there is an error
        except Exception as e:  # noqa: E722
            tx_context.cache.clear()
            yield _datastore_api.rollback(transaction_id)
            raise e

        tx_context._clear_global_cache()
        for callback in on_commit_callbacks:
            callback()

        raise tasklets.Return(result)
Exemple #9
0
def _datastore_run_query(query):
    """Run a query in Datastore.

    Args:
        query (query.QueryOptions): The query spec.

    Returns:
        tasklets.Future:
    """
    query_pb = _query_to_protobuf(query)
    partition_id = entity_pb2.PartitionId(project_id=query.project,
                                          namespace_id=query.namespace)
    read_options = _datastore_api.get_read_options(query)
    request = datastore_pb2.RunQueryRequest(
        project_id=query.project,
        partition_id=partition_id,
        query=query_pb,
        read_options=read_options,
    )
    response = yield _datastore_api.make_call("RunQuery",
                                              request,
                                              timeout=query.timeout)
    utils.logging_debug(log, response)
    raise tasklets.Return(response)
Exemple #10
0
 def some_task(transaction, future):
     assert context_module.get_context().transaction == transaction
     yield future
     raise tasklets.Return(context_module.get_context().transaction)
Exemple #11
0
 def generator(dependency):
     value = yield dependency
     raise tasklets.Return(value + 3)
Exemple #12
0
 def generator_function(dependencies):
     one, two = yield dependencies
     raise tasklets.Return(one + two)
Exemple #13
0
 def generator_function():
     yield
     raise tasklets.Return(42)
Exemple #14
0
 def callback():
     result = yield tasklet_future
     raise tasklets.Return(result)
Exemple #15
0
 def generator_function(dependency, error_handler):
     try:
         yield dependency
     except Exception:
         result = yield error_handler
         raise tasklets.Return(result)
 def regular_function(value):
     raise tasklets.Return(value + 3)
Exemple #17
0
 def generator_function(value):
     future = tasklets.Future(value)
     future.set_result(value)
     x = yield future
     raise tasklets.Return(x + 3)
Exemple #18
0
def _transaction_async(context, callback, read_only=False):
    # Avoid circular import in Python 2.7
    from google.cloud.ndb import _cache
    from google.cloud.ndb import _datastore_api

    # Start the transaction
    utils.logging_debug(log, "Start transaction")
    transaction_id = yield _datastore_api.begin_transaction(read_only,
                                                            retries=0)
    utils.logging_debug(log, "Transaction Id: {}", transaction_id)

    on_commit_callbacks = []
    tx_context = context.new(
        transaction=transaction_id,
        on_commit_callbacks=on_commit_callbacks,
        batches=None,
        commit_batches=None,
        cache=None,
        # We could just pass `None` here and let the `Context` constructor
        # instantiate a new event loop, but our unit tests inject a subclass of
        # `EventLoop` that makes testing a little easier. This makes sure the
        # new event loop is of the same type as the current one, to propagate
        # the event loop class used for testing.
        eventloop=type(context.eventloop)(),
        retry=context.get_retry_state(),
    )

    # The outer loop is dependent on the inner loop
    def run_inner_loop(inner_context):
        with inner_context.use():
            if inner_context.eventloop.run1():
                return True  # schedule again

    context.eventloop.add_idle(run_inner_loop, tx_context)

    tx_context.global_cache_flush_keys = flush_keys = set()
    with tx_context.use():
        try:
            # Run the callback
            result = callback()
            if isinstance(result, tasklets.Future):
                result = yield result

            # Make sure we've run everything we can run before calling commit
            _datastore_api.prepare_to_commit(transaction_id)
            tx_context.eventloop.run()

            # Commit the transaction
            yield _datastore_api.commit(transaction_id, retries=0)

        # Rollback if there is an error
        except Exception as e:  # noqa: E722
            tx_context.cache.clear()
            yield _datastore_api.rollback(transaction_id)
            raise e

        # Flush keys of entities written during the transaction from the global cache
        if flush_keys:
            yield [_cache.global_delete(key) for key in flush_keys]

        for callback in on_commit_callbacks:
            callback()

        raise tasklets.Return(result)
Exemple #19
0
 def generator_function(dependency):
     value = yield dependency
     raise tasklets.Return(value + 42)
Exemple #20
0
    def has_next_async(self):
        """Implements :meth:`QueryIterator.has_next_async`."""
        if self._next_result:
            raise tasklets.Return(True)

        if not self._result_sets:
            raise tasklets.Return(False)

        if self._limit == 0:
            raise tasklets.Return(False)

        # Actually get the next result and load it into memory, or else we
        # can't really know
        while True:
            has_nexts = yield [
                result_set.has_next_async() for result_set in self._result_sets
            ]

            self._result_sets = result_sets = [
                result_set
                for i, result_set in enumerate(self._result_sets)
                if has_nexts[i]
            ]

            if not result_sets:
                raise tasklets.Return(False)

            # If sorting, peek at the next values from all result sets and take
            # the minimum.
            if self._sortable:
                min_index, min_value = 0, result_sets[0]._peek()
                for i, result_set in enumerate(result_sets[1:], 1):
                    value = result_sets[i]._peek()
                    if value < min_value:
                        min_value = value
                        min_index = i

                next_result = result_sets[min_index].next()

            # If not sorting, take the next result from the first result set.
            # Will exhaust each result set in turn.
            else:
                next_result = result_sets[0].next()

            # Check to see if it's a duplicate
            hash_key = next_result.result_pb.entity.key.SerializeToString()
            if hash_key in self._seen_keys:
                continue

            # Not a duplicate
            self._seen_keys.add(hash_key)

            # Offset?
            if self._offset:
                self._offset -= 1
                continue

            # Limit?
            if self._limit:
                self._limit -= 1

            self._next_result = next_result

            raise tasklets.Return(True)
Exemple #21
0
 def iterate():
     results = []
     while (yield iterator.has_next_async()):
         results.append(iterator.next())
     raise tasklets.Return(results)
Exemple #22
0
def lookup(key, options):
    """Look up a Datastore entity.

    Gets an entity from Datastore, asynchronously. Checks the global cache,
    first, if appropriate. Uses batching.

    Args:
        key (~datastore.Key): The key for the entity to retrieve.
        options (_options.ReadOptions): The options for the request. For
            example, ``{"read_consistency": EVENTUAL}``.

    Returns:
        :class:`~tasklets.Future`: If not an exception, future's result will be
            either an entity protocol buffer or _NOT_FOUND.
    """
    context = context_module.get_context()
    use_datastore = context._use_datastore(key, options)
    if use_datastore and options.transaction:
        use_global_cache = False
    else:
        use_global_cache = context._use_global_cache(key, options)

    if not (use_global_cache or use_datastore):
        raise TypeError("use_global_cache and use_datastore can't both be False")

    entity_pb = _NOT_FOUND
    key_locked = False

    if use_global_cache:
        cache_key = _cache.global_cache_key(key)
        result = yield _cache.global_get(cache_key)
        key_locked = _cache.is_locked_value(result)
        if not key_locked:
            if result:
                entity_pb = entity_pb2.Entity()
                entity_pb.MergeFromString(result)

            elif use_datastore:
                lock = yield _cache.global_lock_for_read(cache_key, result)
                if lock:
                    yield _cache.global_watch(cache_key, lock)

                else:
                    # Another thread locked or wrote to this key after the call to
                    # _cache.global_get above. Behave as though the key was locked by
                    # another thread and don't attempt to write our value below
                    key_locked = True

    if entity_pb is _NOT_FOUND and use_datastore:
        batch = _batch.get_batch(_LookupBatch, options)
        entity_pb = yield batch.add(key)

        # Do not cache misses
        if use_global_cache and not key_locked:
            if entity_pb is not _NOT_FOUND:
                expires = context._global_cache_timeout(key, options)
                serialized = entity_pb.SerializeToString()
                yield _cache.global_compare_and_swap(
                    cache_key, serialized, expires=expires
                )
            else:
                yield _cache.global_unwatch(cache_key)

    raise tasklets.Return(entity_pb)