def create(self, project_id): tr = self._db.create_transaction() tx_dir = yield self._tx_metadata(tr, project_id) scatter_val = random.randint(0, 15) tr.set_versionstamped_key(tx_dir.encode_start_key(scatter_val), b'') versionstamp_future = tr.get_versionstamp() yield self._tornado_fdb.commit(tr) txid = TransactionID.encode(scatter_val, versionstamp_future.wait().value) raise gen.Return(txid)
def get_metadata(self, tr, project_id, txid): tx_dir = yield self._tx_metadata(tr, project_id) results = yield ResultIterator(tr, self._tornado_fdb, tx_dir.get_txid_slice(txid)).list() scatter_val, tx_start_versionstamp = TransactionID.decode(txid) if (not results or results[0].key != tx_dir.encode_start_key(scatter_val, tx_start_versionstamp)): raise BadRequest(u'Transaction not found') raise gen.Return(tx_dir.decode_metadata(txid, results[1:]))
def apply_txn_changes(self, project_id, txid, retries=5): logger.debug(u'Applying {}:{}'.format(project_id, txid)) project_id = decode_str(project_id) tr = self._db.create_transaction() read_versionstamp = TransactionID.decode(txid)[1] lookups, queried_groups, mutations = yield self._tx_manager.get_metadata( tr, project_id, txid) try: writes = yield self._apply_mutations(tr, project_id, queried_groups, mutations, lookups, read_versionstamp) finally: yield self._tx_manager.delete(tr, project_id, txid) versionstamp_future = None old_entries = [ old_entry for old_entry, _, _ in writes if old_entry.present ] if old_entries: versionstamp_future = tr.get_versionstamp() try: yield self._tornado_fdb.commit(tr, convert_exceptions=False) except fdb.FDBError as fdb_error: if fdb_error.code != FDBErrorCodes.NOT_COMMITTED: raise InternalError(fdb_error.description) retries -= 1 if retries < 0: raise InternalError(fdb_error.description) yield self.apply_txn_changes(project_id, txid, retries) return if old_entries: self._gc.clear_later(old_entries, versionstamp_future.wait().value) mutations = [(old_entry, FDBDatastore._filter_version(new_entry), index_stats) for old_entry, new_entry, index_stats in writes if index_stats is not None] IOLoop.current().spawn_callback(self._stats_buffer.update, project_id, mutations) logger.debug(u'Finished applying {}:{}'.format(project_id, txid))
def dynamic_get(self, project_id, get_request, get_response): logger.debug(u'get_request:\n{}'.format(get_request)) project_id = decode_str(project_id) tr = self._db.create_transaction() read_versionstamp = None if get_request.has_transaction(): yield self._tx_manager.log_lookups(tr, project_id, get_request) # Ensure the GC hasn't cleaned up an entity written after the tx start. safe_read_stamps = yield [ self._gc.safe_read_versionstamp(tr, key) for key in get_request.key_list() ] safe_read_stamps = [ vs for vs in safe_read_stamps if vs is not None ] read_versionstamp = TransactionID.decode( get_request.transaction().handle())[1] if any(safe_versionstamp > read_versionstamp for safe_versionstamp in safe_read_stamps): raise BadRequest(u'The specified transaction has expired') futures = [] for key in get_request.key_list(): futures.append( self._data_manager.get_latest(tr, key, read_versionstamp, snapshot=True)) version_entries = yield futures # If this read is in a transaction, logging the RPC is a mutation. yield self._tornado_fdb.commit(tr) for entry in version_entries: response_entity = get_response.add_entity() response_entity.set_version(entry.version) if entry.has_entity: response_entity.mutable_entity().MergeFrom(entry.decoded) else: response_entity.mutable_key().MergeFrom(entry.key) logger.debug(u'fetched paths: {}'.format( [entry.path for entry in version_entries if entry.has_entity]))
def apply_txn_changes(self, project_id, txid, retries=5): logger.debug(u'Applying {}:{}'.format(project_id, txid)) project_id = decode_str(project_id) tr = self._db.create_transaction() read_versionstamp = TransactionID.decode(txid)[1] lookups, queried_groups, mutations = yield self._tx_manager.get_metadata( tr, project_id, txid) try: old_entries = yield self._apply_mutations(tr, project_id, queried_groups, mutations, lookups, read_versionstamp) finally: yield self._tx_manager.delete(tr, project_id, txid) versionstamp_future = None if old_entries: versionstamp_future = tr.get_versionstamp() try: yield self._tornado_fdb.commit(tr, convert_exceptions=False) except fdb.FDBError as fdb_error: if fdb_error.code != FDBErrorCodes.NOT_COMMITTED: raise InternalError(fdb_error.description) retries -= 1 if retries < 0: raise InternalError(fdb_error.description) yield self.apply_txn_changes(project_id, txid, retries) return if old_entries: self._gc.clear_later(old_entries, versionstamp_future.wait().value) logger.debug(u'Finished applying {}:{}'.format(project_id, txid))
def _txid_prefix(self, txid): scatter_val, commit_versionstamp = TransactionID.decode(txid) return (self.directory.rawPrefix + six.int2byte(scatter_val) + commit_versionstamp)
def _dynamic_run_query(self, query, query_result): logger.debug(u'query: {}'.format(query)) project_id = decode_str(query.app()) tr = self._db.create_transaction() read_versionstamp = None if query.has_transaction(): yield self._tx_manager.log_query(tr, project_id, query) # Ensure the GC hasn't cleaned up an entity written after the tx start. safe_versionstamp = yield self._gc.safe_read_versionstamp( tr, query.ancestor()) read_versionstamp = TransactionID.decode( query.transaction().handle())[1] if (safe_versionstamp is not None and safe_versionstamp > read_versionstamp): raise BadRequest(u'The specified transaction has expired') fetch_data = self._index_manager.include_data(query) rpc_limit, check_more_results = self._index_manager.rpc_limit(query) iterator = yield self._index_manager.get_iterator( tr, query, read_versionstamp) for prop_name in query.property_name_list(): prop_name = decode_str(prop_name) if prop_name not in iterator.prop_names: raise BadRequest( u'Projections on {} are not supported'.format(prop_name)) data_futures = [] if fetch_data else None unique_keys = set() results = [] entries_fetched = 0 skipped_results = 0 cursor = None while True: remainder = rpc_limit - entries_fetched iter_offset = max(query.offset() - entries_fetched, 0) entries, more_iterator_results = yield iterator.next_page() entries_fetched += len(entries) if not entries and more_iterator_results: continue if not entries and not more_iterator_results: break skipped_results += min(len(entries), iter_offset) suitable_entries = entries[iter_offset:remainder] if entries[:remainder]: cursor = entries[:remainder][-1] if not fetch_data and not query.keys_only(): results.extend( [entry.prop_result() for entry in suitable_entries]) continue for entry in suitable_entries: if entry.path in unique_keys: continue unique_keys.add(entry.path) if fetch_data: data_futures.append( self._data_manager.get_entry(tr, entry, snapshot=True)) else: results.append(entry.key_result()) if not more_iterator_results: break if fetch_data: entity_results = yield data_futures results = [entity.encoded for entity in entity_results] else: results = [result.Encode() for result in results] yield self._tornado_fdb.commit(tr) query_result.result_list().extend(results) # TODO: Figure out how ndb multi queries use compiled cursors. if query.compile(): ordered_props = tuple(prop_name for prop_name, _ in get_order_info(query) if prop_name != KEY_PROP) mutable_cursor = query_result.mutable_compiled_cursor() if cursor is not None: mutable_cursor.MergeFrom(cursor.cursor_result(ordered_props)) more_results = check_more_results and entries_fetched > rpc_limit query_result.set_more_results(more_results) if skipped_results: query_result.set_skipped_results(skipped_results) if query.keys_only(): query_result.set_keys_only(True) logger.debug(u'{} results'.format(len(query_result.result_list())))