Example #1
0
def init_fact_database(n=10):
    if Fact.query().count(1) == 0:
        logging.warning('Bootstrapping facts')
        futures = []
        for i in range(n):
            futures.append(Fact(text='Fact %d' % i).put_async())
        raise tasklets.Return(futures)
def fetch_data_async(blob, start_index, end_index, **options):
    """Async version of fetch_data()."""
    if isinstance(blob, BlobInfo):
        blob = blob.key()
    rpc = blobstore.create_rpc(**options)
    rpc = blobstore.fetch_data_async(blob, start_index, end_index, rpc=rpc)
    result = yield rpc
    raise tasklets.Return(result)
def create_upload_url_async(success_path,
                            max_bytes_per_blob=None,
                            max_bytes_total=None,
                            **options):
    """Async version of create_upload_url()."""
    rpc = blobstore.create_rpc(**options)
    rpc = blobstore.create_upload_url_async(
        success_path,
        max_bytes_per_blob=max_bytes_per_blob,
        max_bytes_total=max_bytes_total,
        rpc=rpc)
    result = yield rpc
    raise tasklets.Return(result)
Example #4
0
    def _load_from_cache_if_available(self, key):
        """Returns a cached Model instance given the entity key if available.

    Args:
      key: Key instance.

    Returns:
      A Model instance if the key exists in the cache.
    """
        if key in self._cache:
            entity = self._cache[key]
            if entity is None or entity._key == key:

                raise tasklets.Return(entity)
Example #5
0
 def urlfetch(self,
              url,
              payload=None,
              method='GET',
              headers={},
              allow_truncated=False,
              follow_redirects=True,
              validate_certificate=None,
              deadline=None,
              callback=None):
     rpc = urlfetch.create_rpc(deadline=deadline, callback=callback)
     urlfetch.make_fetch_call(rpc,
                              url,
                              payload=payload,
                              method=method,
                              headers=headers,
                              allow_truncated=allow_truncated,
                              follow_redirects=follow_redirects,
                              validate_certificate=validate_certificate)
     result = yield rpc
     raise tasklets.Return(result)
Example #6
0
    def transaction(self, callback, **ctx_options):

        options = _make_ctx_options(ctx_options, TransactionOptions)
        propagation = TransactionOptions.propagation(options)
        if propagation is None:
            propagation = TransactionOptions.NESTED

        mode = datastore_rpc.TransactionMode.READ_WRITE
        if ctx_options.get('read_only', False):
            mode = datastore_rpc.TransactionMode.READ_ONLY

        parent = self
        if propagation == TransactionOptions.NESTED:
            if self.in_transaction():
                raise datastore_errors.BadRequestError(
                    'Nested transactions are not supported.')
        elif propagation == TransactionOptions.MANDATORY:
            if not self.in_transaction():
                raise datastore_errors.BadRequestError(
                    'Requires an existing transaction.')
            result = callback()
            if isinstance(result, tasklets.Future):
                result = yield result
            raise tasklets.Return(result)
        elif propagation == TransactionOptions.ALLOWED:
            if self.in_transaction():
                result = callback()
                if isinstance(result, tasklets.Future):
                    result = yield result
                raise tasklets.Return(result)
        elif propagation == TransactionOptions.INDEPENDENT:
            while parent.in_transaction():
                parent = parent._parent_context
                if parent is None:
                    raise datastore_errors.BadRequestError(
                        'Context without non-transactional ancestor')
        else:
            raise datastore_errors.BadArgumentError(
                'Invalid propagation value (%s).' % (propagation, ))

        app = TransactionOptions.app(options) or key_module._DefaultAppId()

        retries = TransactionOptions.retries(options)
        if retries is None:
            retries = 3
        yield parent.flush()

        transaction = None
        tconn = None
        for _ in range(1 + max(0, retries)):
            previous_transaction = (transaction if mode
                                    == datastore_rpc.TransactionMode.READ_WRITE
                                    else None)
            transaction = yield (parent._conn.async_begin_transaction(
                options, app, previous_transaction, mode))
            tconn = datastore_rpc.TransactionalConnection(
                adapter=parent._conn.adapter,
                config=parent._conn.config,
                transaction=transaction,
                _api_version=parent._conn._api_version)
            tctx = parent.__class__(
                conn=tconn,
                auto_batcher_class=parent._auto_batcher_class,
                parent_context=parent)
            tctx._old_ds_conn = datastore._GetConnection()
            ok = False
            try:

                tctx.set_memcache_policy(parent.get_memcache_policy())
                tctx.set_memcache_timeout_policy(
                    parent.get_memcache_timeout_policy())
                tasklets.set_context(tctx)
                datastore._SetConnection(tconn)
                try:
                    try:
                        result = callback()
                        if isinstance(result, tasklets.Future):
                            result = yield result
                    finally:
                        yield tctx.flush()
                except GeneratorExit:
                    raise
                except Exception:
                    t, e, tb = sys.exc_info()
                    tconn.async_rollback(options)
                    if issubclass(t, datastore_errors.Rollback):

                        return
                    else:
                        six.reraise(t, e, tb)
                else:
                    ok = yield tconn.async_commit(options)
                    if ok:
                        parent._cache.update(tctx._cache)
                        yield parent._clear_memcache(tctx._cache)
                        raise tasklets.Return(result)

            finally:
                datastore._SetConnection(tctx._old_ds_conn)
                del tctx._old_ds_conn
                if ok:

                    for on_commit_callback in tctx._on_commit_queue:
                        on_commit_callback()

        tconn.async_rollback(options)
        raise datastore_errors.TransactionFailedError(
            'The transaction could not be committed. Please try again.')
Example #7
0
 def get_indexes(self, **ctx_options):
     options = _make_ctx_options(ctx_options)
     index_list = yield self._conn.async_get_indexes(options)
     raise tasklets.Return(index_list)
Example #8
0
 def allocate_ids(self, key, size=None, max=None, **ctx_options):
     options = _make_ctx_options(ctx_options)
     lo_hi = yield self._conn.async_allocate_ids(options, key, size, max)
     raise tasklets.Return(lo_hi)
Example #9
0
    def put(self, entity, **ctx_options):
        options = _make_ctx_options(ctx_options)

        key = entity._key
        if key is None:

            key = model.Key(entity.__class__, None)
        use_datastore = self._use_datastore(key, options)
        use_memcache = None
        memcache_deadline = None

        if entity._has_complete_key():
            use_memcache = self._use_memcache(key, options)
            if use_memcache:

                memcache_deadline = self._get_memcache_deadline(options)
                mkey = self._memcache_prefix + key.urlsafe()
                ns = key.namespace()
                if use_datastore:
                    yield self.memcache_set(mkey,
                                            _LOCKED,
                                            time=_LOCK_TIME,
                                            namespace=ns,
                                            use_cache=True,
                                            deadline=memcache_deadline)
                else:
                    pbs = entity._to_pb(
                        set_key=False).SerializePartialToString()

                    if len(pbs) > memcache.MAX_VALUE_SIZE:
                        raise ValueError(
                            'Values may not be more than %d bytes in length; '
                            'received %d bytes' %
                            (memcache.MAX_VALUE_SIZE, len(pbs)))
                    timeout = self._get_memcache_timeout(key, options)
                    yield self.memcache_set(mkey,
                                            pbs,
                                            time=timeout,
                                            namespace=ns,
                                            deadline=memcache_deadline)

        if use_datastore:
            key = yield self._put_batcher.add(entity, options)
            if not isinstance(self._conn,
                              datastore_rpc.TransactionalConnection):
                if use_memcache is None:
                    use_memcache = self._use_memcache(key, options)
                if use_memcache:
                    mkey = self._memcache_prefix + key.urlsafe()
                    ns = key.namespace()

                    yield self.memcache_delete(mkey,
                                               namespace=ns,
                                               deadline=memcache_deadline)

        if key is not None:
            if entity._key != key:
                logging.info('replacing key %s with %s', entity._key, key)
                entity._key = key

            if self._use_cache(key, options):

                self._cache[key] = entity

        raise tasklets.Return(key)
Example #10
0
    def get(self, key, **ctx_options):
        """Return a Model instance given the entity key.

    It will use the context cache if the cache policy for the given
    key is enabled.

    Args:
      key: Key instance.
      **ctx_options: Context options.

    Returns:
      A Model instance if the key exists in the datastore; None otherwise.
    """
        options = _make_ctx_options(ctx_options)
        use_cache = self._use_cache(key, options)
        if use_cache:
            self._load_from_cache_if_available(key)

        use_datastore = self._use_datastore(key, options)
        if (use_datastore and isinstance(
                self._conn, datastore_rpc.TransactionalConnection)):
            use_memcache = False
        else:
            use_memcache = self._use_memcache(key, options)
        ns = key.namespace()
        memcache_deadline = None

        if use_memcache:
            mkey = self._memcache_prefix + key.urlsafe()
            memcache_deadline = self._get_memcache_deadline(options)
            mvalue = yield self.memcache_get(mkey,
                                             for_cas=use_datastore,
                                             namespace=ns,
                                             use_cache=True,
                                             deadline=memcache_deadline)

            if use_cache:
                self._load_from_cache_if_available(key)
            if mvalue not in (_LOCKED, None):
                cls = model.Model._lookup_model(
                    key.kind(), self._conn.adapter.default_model)
                pb = entity_pb2.EntityProto()

                try:
                    pb.MergeFromString(mvalue)
                except message.DecodeError:
                    logging.warning(
                        'Corrupt memcache entry found '
                        'with key %s and namespace %s', mkey, ns)
                    mvalue = None
                else:
                    entity = cls._from_pb(pb)

                    entity._key = key
                    if use_cache:

                        self._cache[key] = entity
                    raise tasklets.Return(entity)

            if mvalue is None and use_datastore:
                yield self.memcache_set(mkey,
                                        _LOCKED,
                                        time=_LOCK_TIME,
                                        namespace=ns,
                                        use_cache=True,
                                        deadline=memcache_deadline)
                yield self.memcache_gets(mkey,
                                         namespace=ns,
                                         use_cache=True,
                                         deadline=memcache_deadline)

        if not use_datastore:

            raise tasklets.Return(None)

        if use_cache:
            entity = yield self._get_batcher.add_once(key, options)
        else:
            entity = yield self._get_batcher.add(key, options)

        if entity is not None:
            if use_memcache and mvalue != _LOCKED:

                pbs = entity._to_pb(set_key=False).SerializePartialToString()

                if len(pbs) <= memcache.MAX_VALUE_SIZE:
                    timeout = self._get_memcache_timeout(key, options)

                    yield self.memcache_cas(mkey,
                                            pbs,
                                            time=timeout,
                                            namespace=ns,
                                            deadline=memcache_deadline)

        if use_cache:

            self._cache[key] = entity

        raise tasklets.Return(entity)
Example #11
0
def randomize_rating(f):
    f.elo_rating = random.normalvariate(400, 20)
    raise tasklets.Return(f.put_async())