def _generate_test(self): util.get_logger().info( 'Generating %s test at seed %d with %d op(s) and %d concurrent tester(s)...' % (self.args.test_name, self.args.seed, self.args.num_ops, self.args.concurrency)) random.seed(self.test_seed) if self.args.concurrency == 1: self.test.setup(self.args) test_instructions = { fdb.Subspace((bytes(self.args.instruction_prefix, 'utf-8'), )): self.test.generate(self.args, 0) } else: test_instructions = {} main_thread = InstructionSet() for i in range(self.args.concurrency): # thread_spec = fdb.Subspace(('thread_spec', i)) thread_spec = b'thread_spec%d' % i main_thread.push_args(thread_spec) main_thread.append('START_THREAD') self.test.setup(self.args) test_instructions[fdb.Subspace( (thread_spec, ))] = self.test.generate(self.args, i) test_instructions[fdb.Subspace((bytes(self.args.instruction_prefix, 'utf-8'), ))] = main_thread return test_instructions
def __init__(self, args): self.args = copy.copy(args) self.db = fdb.open(self.args.cluster_file) self.test_seed = random.randint(0, 0xffffffff) self.testers = [Tester.get_test(self.args.test1)] if self.args.test2 is not None: self.testers.append(Tester.get_test(self.args.test2)) self.test = Test.create_test(self.args.test_name, fdb.Subspace((self.args.output_subspace,))) if self.test is None: raise Exception('the test \'%s\' could not be found' % self.args.test_name) min_api_version = max([tester.min_api_version for tester in self.testers]) max_api_version = min([tester.max_api_version for tester in self.testers]) self.args.api_version = choose_api_version(self.args.api_version, min_api_version, max_api_version, self.test.min_api_version, self.test.max_api_version) util.get_logger().info('\nCreating test at API version %d' % self.args.api_version) max_int_bits = min([tester.max_int_bits for tester in self.testers]) if self.args.max_int_bits is None: self.args.max_int_bits = max_int_bits elif self.args.max_int_bits > max_int_bits: raise Exception('The specified testers support at most %d-bit ints, but --max-int-bits was set to %d' % (max_int_bits, self.args.max_int_bits)) self.args.no_threads = self.args.no_threads or any([not tester.threads_enabled for tester in self.testers]) if self.args.no_threads and self.args.concurrency > 1: raise Exception('Not all testers support concurrency') # Test types should be intersection of all tester supported types self.args.types = reduce(lambda t1, t2: filter(t1.__contains__, t2), map(lambda tester: tester.types, self.testers))
def __init__(self, db, name): self._db = db self._device = fdb.Subspace(('dev', name)) self._block_size = int(self._db[self._device['meta']['block_size']]) self._size = self._block_size * int(self._db[self._device['meta']['num_blocks']]) self._blocks = self._device['blocks'] self._empty = '\0' * self._block_size self._pos = 0
def validate_hca_state(db): hca = fdb.Subspace(('\xfe', 'hca'), '\xfe') counters = hca[0] recent = hca[1] last_counter = db.get_range(counters.range().start, counters.range().stop, limit=1, reverse=True) [(start, reported_count)] = [(counters.unpack(kv.key)[0], struct.unpack('<q', kv.value)[0]) for kv in last_counter] or [(0, 0)] actual_count = len(db[recent[start]: recent.range().stop]) if actual_count > reported_count: return ['The HCA reports %d prefixes allocated in current window, but it actually allocated %d' % (reported_count, actual_count)] return []
def generate_tsfdb_queues_metrics(db, timestamp): try: line = "queue,machine_id=tsfdb " available_queues_subspace = fdb.Subspace(('available_queues', )) count = 0 for count, (k, v) in enumerate(db[available_queues_subspace.range()], 1): name = available_queues_subspace.unpack(k)[0] line += "%s=%d," % (name, Queue(name).count_items(db)) line += "count=%d %s" % (count, timestamp) return [line] except fdb.FDBError as err: print("ERROR: Could not get queues metrics: %s" % str(err.description, 'utf-8')) return []
def generate_tsfdb_queues_metrics(self): try: metrics = {} available_queues_subspace = fdb.Subspace(('available_queues', )) count = 0 for count, (k, _) in enumerate( self.db[available_queues_subspace.range()], 1): name = available_queues_subspace.unpack(k)[0] metric = { f"tsfdb.queue.{name}": Queue(name).count_items(self.db) } metrics.update(metric) metric = {"tsfdb.queue.count": count} metrics.update(metric) return metrics except fdb.FDBError as err: print("ERROR: Could not get queues metrics: %s" % str(err.description, 'utf-8')) return {}
# limitations under the License. # import struct import fdb fdb.api_version(300) db = fdb.open() @fdb.transactional def clear_subspace(tr, subspace): tr.clear_range_startswith(subspace.key()) multi = fdb.Subspace(('M', )) clear_subspace(db, multi) # Multimaps with multiset values @fdb.transactional def multi_add(tr, index, value): tr.add(multi[index][value], struct.pack('<q', 1)) @fdb.transactional def multi_subtract(tr, index, value): v = tr[multi[index][value]] if v.present() and struct.unpack('<q', str(v))[0] > 1: tr.add(multi[index][value], struct.pack('<q', -1))
def process_instruction(self, inst): try: if log_all or log_instructions: print("%d. %s" % (inst.index, inst.op)) directory = self.dir_list[self.dir_index] if inst.op == six.u('DIRECTORY_CREATE_SUBSPACE'): path = self.pop_tuples(inst.stack) raw_prefix = inst.pop() log_op('created subspace at %r: %r' % (path, raw_prefix)) self.append_dir(inst, fdb.Subspace(path, raw_prefix)) elif inst.op == six.u('DIRECTORY_CREATE_LAYER'): index1, index2, allow_manual_prefixes = inst.pop(3) if self.dir_list[index1] is None or self.dir_list[ index2] is None: log_op('create directory layer: None') self.append_dir(inst, None) else: log_op( 'create directory layer: node_subspace (%d) = %r, content_subspace (%d) = %r, allow_manual_prefixes = %d' % (index1, self.dir_list[index1].rawPrefix, index2, self.dir_list[index2].rawPrefix, allow_manual_prefixes)) self.append_dir( inst, fdb.DirectoryLayer(self.dir_list[index1], self.dir_list[index2], allow_manual_prefixes == 1)) elif inst.op == six.u('DIRECTORY_CHANGE'): self.dir_index = inst.pop() if not self.dir_list[self.dir_index]: self.dir_index = self.error_index if log_dirs or log_all: new_dir = self.dir_list[self.dir_index] clazz = new_dir.__class__.__name__ new_path = repr(new_dir._path) if hasattr( new_dir, '_path') else "<na>" print('changed directory to %d (%s @%r)' % (self.dir_index, clazz, new_path)) elif inst.op == six.u('DIRECTORY_SET_ERROR_INDEX'): self.error_index = inst.pop() elif inst.op == six.u('DIRECTORY_CREATE_OR_OPEN'): path = self.pop_tuples(inst.stack) layer = inst.pop() log_op('create_or_open %r: layer=%r' % (directory.get_path() + path, layer)) d = directory.create_or_open(inst.tr, path, layer or b'') self.append_dir(inst, d) elif inst.op == six.u('DIRECTORY_CREATE'): path = self.pop_tuples(inst.stack) layer, prefix = inst.pop(2) log_op('create %r: layer=%r, prefix=%r' % (directory.get_path() + path, layer, prefix)) self.append_dir( inst, directory.create(inst.tr, path, layer or b'', prefix)) elif inst.op == six.u('DIRECTORY_OPEN'): path = self.pop_tuples(inst.stack) layer = inst.pop() log_op('open %r: layer=%r' % (directory.get_path() + path, layer)) self.append_dir(inst, directory.open(inst.tr, path, layer or b'')) elif inst.op == six.u('DIRECTORY_MOVE'): old_path, new_path = self.pop_tuples(inst.stack, 2) log_op('move %r to %r' % (directory.get_path() + old_path, directory.get_path() + new_path)) self.append_dir(inst, directory.move(inst.tr, old_path, new_path)) elif inst.op == six.u('DIRECTORY_MOVE_TO'): new_absolute_path = self.pop_tuples(inst.stack) log_op('move %r to %r' % (directory.get_path(), new_absolute_path)) self.append_dir(inst, directory.move_to(inst.tr, new_absolute_path)) elif inst.op == six.u('DIRECTORY_REMOVE'): count = inst.pop() if count == 0: log_op('remove %r' % (directory.get_path(), )) directory.remove(inst.tr) else: path = self.pop_tuples(inst.stack) log_op('remove %r' % (directory.get_path() + path, )) directory.remove(inst.tr, path) elif inst.op == six.u('DIRECTORY_REMOVE_IF_EXISTS'): count = inst.pop() if count == 0: log_op('remove_if_exists %r' % (directory.get_path(), )) directory.remove_if_exists(inst.tr) else: path = self.pop_tuples(inst.stack) log_op('remove_if_exists %r' % (directory.get_path() + path, )) directory.remove_if_exists(inst.tr, path) elif inst.op == six.u('DIRECTORY_LIST'): count = inst.pop() if count == 0: result = directory.list(inst.tr) log_op('list %r' % (directory.get_path(), )) else: path = self.pop_tuples(inst.stack) result = directory.list(inst.tr, path) log_op('list %r' % (directory.get_path() + path, )) inst.push(fdb.tuple.pack(tuple(result))) elif inst.op == six.u('DIRECTORY_EXISTS'): count = inst.pop() if count == 0: result = directory.exists(inst.tr) log_op('exists %r: %d' % (directory.get_path(), result)) else: path = self.pop_tuples(inst.stack) result = directory.exists(inst.tr, path) log_op('exists %r: %d' % (directory.get_path() + path, result)) if result: inst.push(1) else: inst.push(0) elif inst.op == six.u('DIRECTORY_PACK_KEY'): key_tuple = self.pop_tuples(inst.stack) inst.push(directory.pack(key_tuple)) elif inst.op == six.u('DIRECTORY_UNPACK_KEY'): key = inst.pop() log_op('unpack %r in subspace with prefix %r' % (key, directory.rawPrefix)) tup = directory.unpack(key) for t in tup: inst.push(t) elif inst.op == six.u('DIRECTORY_RANGE'): tup = self.pop_tuples(inst.stack) rng = directory.range(tup) inst.push(rng.start) inst.push(rng.stop) elif inst.op == six.u('DIRECTORY_CONTAINS'): key = inst.pop() result = directory.contains(key) if result: inst.push(1) else: inst.push(0) elif inst.op == six.u('DIRECTORY_OPEN_SUBSPACE'): path = self.pop_tuples(inst.stack) log_op('open_subspace %r (at %r)' % (path, directory.key())) self.append_dir(inst, directory.subspace(path)) elif inst.op == six.u('DIRECTORY_LOG_SUBSPACE'): prefix = inst.pop() inst.tr[prefix + fdb.tuple.pack((self.dir_index, ))] = directory.key() elif inst.op == six.u('DIRECTORY_LOG_DIRECTORY'): prefix = inst.pop() exists = directory.exists(inst.tr) if exists: children = tuple(directory.list(inst.tr)) else: children = () logSubspace = fdb.Subspace((self.dir_index, ), prefix) inst.tr[logSubspace[six.u('path')]] = fdb.tuple.pack( directory.get_path()) inst.tr[logSubspace[six.u('layer')]] = fdb.tuple.pack( (directory.get_layer(), )) inst.tr[logSubspace[six.u('exists')]] = fdb.tuple.pack( (int(exists), )) inst.tr[logSubspace[six.u('children')]] = fdb.tuple.pack( children) elif inst.op == six.u('DIRECTORY_STRIP_PREFIX'): s = inst.pop() if not s.startswith(directory.key()): raise Exception( 'String %r does not start with raw prefix %r' % (s, directory.key())) inst.push(s[len(directory.key()):]) else: raise Exception('Unknown op: %s' % inst.op) except Exception as e: if log_all or log_errors: print(e) # traceback.print_exc(file=sys.stdout) if inst.op in ops_that_create_dirs: self.append_dir(inst, None) inst.push(b'DIRECTORY_ERROR')
# Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import fdb fdb.api_version(300) db = fdb.open() import itertools import json import random doc_space = fdb.Subspace(('D', )) EMPTY_OBJECT = -2 EMPTY_ARRAY = -1 def to_tuples(item): if item == {}: return [(EMPTY_OBJECT, None)] elif item == []: return [(EMPTY_ARRAY, None)] elif type(item) == dict: return [(k, ) + sub for k, v in item.iteritems() for sub in to_tuples(v)] elif type(item) == list: return [(k, ) + sub for k, v in enumerate(item)
# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import fdb fdb.api_version(300) db = fdb.open() table = fdb.Subspace(('T', )) row_index = table['R'] col_index = table['C'] def _pack(value): return fdb.tuple.pack((value, )) def _unpack(value): return fdb.tuple.unpack(value)[0] @fdb.transactional def table_set_cell(tr, row, column, value): tr[row_index[row][column]] = _pack(value)
# limitations under the License. # import fdb fdb.api_version(300) db = fdb.open() @fdb.transactional def clear_subspace(tr, subspace): tr.clear_range_startswith(subspace.key()) CHUNK_LARGE = 5 blob = fdb.Subspace(('B', )) @fdb.transactional def write_blob(tr, data): if not len(data): return num_chunks = (len(data) + CHUNK_LARGE - 1) / CHUNK_LARGE chunk_size = (len(data) + num_chunks) / num_chunks chunks = [(n, n + chunk_size) for n in range(0, len(data), chunk_size)] for start, end in chunks: tr[blob[start]] = data[start:end] @fdb.transactional def read_blob(tr):
# limitations under the License. # import os import fdb fdb.api_version(300) db = fdb.open() @fdb.transactional def clear_subspace(tr, subspace): tr.clear_range_startswith(subspace.key()) queue = fdb.Subspace(('Q', )) clear_subspace(db, queue) @fdb.transactional def dequeue(tr): item = first_item(tr) if item is None: return None del tr[item.key] return item.value @fdb.transactional def enqueue(tr, value): tr[queue[last_index(tr) + 1][os.urandom(20)]] = value
def generate(self, args, thread_number): self.results = [] test_instructions = ThreadedInstructionSet() main_thread = test_instructions.create_thread() foo = [self.workspace.pack(('foo%d' % i, )) for i in range(0, 6)] main_thread.append('NEW_TRANSACTION') main_thread.push_args(1020) main_thread.append('ON_ERROR') self.add_result(main_thread, args, 'RESULT_NOT_PRESENT') main_thread.append('GET_READ_VERSION') main_thread.push_args(foo[1], 'bar') main_thread.append('SET') main_thread.push_args(foo[1]) main_thread.append('GET') self.add_result(main_thread, args, 'bar') test_util.blocking_commit(main_thread) self.add_result(main_thread, args, 'RESULT_NOT_PRESENT') main_thread.push_args(2000) main_thread.append('ON_ERROR') self.add_result(main_thread, args, test_util.error_string(2000)) main_thread.append('NEW_TRANSACTION') main_thread.push_args(0) main_thread.append('ON_ERROR') self.add_result(main_thread, args, test_util.error_string(2000)) main_thread.append('NEW_TRANSACTION') main_thread.push_args(foo[1]) main_thread.append('DUP') main_thread.append('DUP') main_thread.append('GET') self.add_result(main_thread, args, 'bar') main_thread.append('CLEAR') main_thread.append('GET_SNAPSHOT') self.add_result(main_thread, args, 'RESULT_NOT_PRESENT') main_thread.push_args(foo[1]) main_thread.append('GET_DATABASE') self.add_result(main_thread, args, 'bar') test_util.blocking_commit(main_thread) self.add_result(main_thread, args, 'RESULT_NOT_PRESENT') main_thread.append('SET_READ_VERSION') main_thread.push_args(foo[1]) main_thread.append('DUP') main_thread.append('GET') self.add_result(main_thread, args, 'RESULT_NOT_PRESENT') main_thread.append('CLEAR') test_util.blocking_commit(main_thread) self.add_result(main_thread, args, test_util.error_string(1020)) main_thread.push_args(foo[1]) main_thread.append('GET_SNAPSHOT') self.add_result(main_thread, args, 'RESULT_NOT_PRESENT') main_thread.push_args(foo[1]) main_thread.append('CLEAR') main_thread.append('COMMIT') main_thread.append('WAIT_FUTURE') self.add_result(main_thread, args, 'RESULT_NOT_PRESENT') main_thread.append('GET_COMMITTED_VERSION') main_thread.append('RESET') main_thread.append('EMPTY_STACK') main_thread.append('NEW_TRANSACTION') main_thread.push_args(1, 'bar', foo[1], foo[2], 'bar2', foo[3], 'bar3', foo[4], 'bar4', foo[5], 'bar5') main_thread.append('SWAP') main_thread.append('SET') main_thread.append('SET') main_thread.append('SET') main_thread.append('SET') main_thread.append('SET_DATABASE') test_util.blocking_commit(main_thread) self.add_result(main_thread, args, 'RESULT_NOT_PRESENT') main_thread.append('SET_READ_VERSION') main_thread.push_args(foo[2]) main_thread.append('GET') self.add_result(main_thread, args, 'RESULT_NOT_PRESENT') main_thread.append('NEW_TRANSACTION') main_thread.push_args('', 0, -1, '') main_thread.append('GET_KEY') self.add_result(main_thread, args, '') main_thread.append('NEW_TRANSACTION') main_thread.append('GET_READ_VERSION_SNAPSHOT') main_thread.push_args('random', foo[1], foo[3], 0, 1, 1) main_thread.append('POP') main_thread.append('GET_RANGE') self.add_result(main_thread, args, fdb.tuple.pack((foo[2], 'bar2', foo[1], 'bar'))) main_thread.push_args(foo[1], foo[3], 1, 1, 0) main_thread.append('GET_RANGE_SNAPSHOT') self.add_result(main_thread, args, fdb.tuple.pack((foo[2], 'bar2'))) main_thread.push_args(foo[1], foo[3], 0, 0, 4) main_thread.append('GET_RANGE_DATABASE') self.add_result(main_thread, args, fdb.tuple.pack((foo[1], 'bar', foo[2], 'bar2'))) test_util.blocking_commit(main_thread) self.add_result(main_thread, args, 'RESULT_NOT_PRESENT') main_thread.push_args(foo[3], foo[5]) main_thread.append('CLEAR_RANGE') main_thread.push_args(foo[1], 0, 3, '') main_thread.append('GET_KEY') self.add_result(main_thread, args, foo[5]) main_thread.push_args(foo[1], 1, 2, '') main_thread.append('GET_KEY_SNAPSHOT') self.add_result(main_thread, args, foo[5]) main_thread.push_args(foo[5], 0, -2, '') main_thread.append('GET_KEY_DATABASE') self.add_result(main_thread, args, foo[2]) main_thread.push_args(self.workspace.key(), 2, 0, 2) main_thread.append('GET_RANGE_STARTS_WITH') self.add_result(main_thread, args, fdb.tuple.pack((foo[1], 'bar', foo[2], 'bar2'))) main_thread.push_args(self.workspace.key(), 4, 0, 3) main_thread.append('GET_RANGE_STARTS_WITH_SNAPSHOT') self.add_result( main_thread, args, fdb.tuple.pack((foo[1], 'bar', foo[2], 'bar2', foo[5], 'bar5'))) main_thread.push_args(self.workspace.key(), 3, 1, -1) main_thread.append('GET_RANGE_STARTS_WITH_DATABASE') self.add_result( main_thread, args, fdb.tuple.pack((foo[5], 'bar5', foo[4], 'bar4', foo[3], 'bar3'))) main_thread.push_args(foo[1], 0, 1, foo[1], 0, 3, 0, 0, -1, '') main_thread.append('GET_RANGE_SELECTOR') self.add_result(main_thread, args, fdb.tuple.pack((foo[1], 'bar', foo[2], 'bar2'))) main_thread.push_args(foo[1], 1, 0, foo[1], 1, 3, 0, 0, -1, '') main_thread.append('GET_RANGE_SELECTOR_SNAPSHOT') self.add_result( main_thread, args, fdb.tuple.pack((foo[1], 'bar', foo[2], 'bar2', foo[5], 'bar5'))) main_thread.push_args(foo[1], 0, 1, foo[1], 1, 3, 0, 0, -1, '') main_thread.append('GET_RANGE_SELECTOR_DATABASE') self.add_result( main_thread, args, fdb.tuple.pack((foo[1], 'bar', foo[2], 'bar2', foo[3], 'bar3'))) test_util.blocking_commit(main_thread) self.add_result(main_thread, args, 'RESULT_NOT_PRESENT') main_thread.push_args(self.workspace.key()) main_thread.append('CLEAR_RANGE_STARTS_WITH') main_thread.push_args(self.workspace.key(), 0, 0, -1) main_thread.append('GET_RANGE_STARTS_WITH') self.add_result(main_thread, args, '') test_util.blocking_commit(main_thread) self.add_result(main_thread, args, 'RESULT_NOT_PRESENT') main_thread.append('SET_READ_VERSION') main_thread.push_args(foo[1]) main_thread.append('GET') self.add_result(main_thread, args, 'bar') test_util.blocking_commit(main_thread) self.add_result(main_thread, args, 'RESULT_NOT_PRESENT') main_thread.push_args(foo[1], 'bar', foo[2], 'bar2', foo[3], 'bar3', foo[4], 'bar4', foo[5], 'bar5') main_thread.append('SET') main_thread.append('SET') main_thread.append('SET') main_thread.append('SET') main_thread.append('SET') test_util.blocking_commit(main_thread) self.add_result(main_thread, args, 'RESULT_NOT_PRESENT') main_thread.push_args(foo[2]) main_thread.append('CLEAR_DATABASE') main_thread.append('WAIT_FUTURE') main_thread.push_args(self.workspace.key(), 0, 0, -1) main_thread.append('GET_RANGE_STARTS_WITH_DATABASE') self.add_result( main_thread, args, fdb.tuple.pack((foo[1], 'bar', foo[3], 'bar3', foo[4], 'bar4', foo[5], 'bar5'))) main_thread.push_args(foo[3], foo[5]) main_thread.append('CLEAR_RANGE_DATABASE') main_thread.append('WAIT_FUTURE') main_thread.push_args(self.workspace.key(), 0, 0, -1) main_thread.append('GET_RANGE_STARTS_WITH_DATABASE') self.add_result(main_thread, args, fdb.tuple.pack((foo[1], 'bar', foo[5], 'bar5'))) main_thread.push_args(self.workspace.key()) main_thread.append('CLEAR_RANGE_STARTS_WITH_DATABASE') main_thread.append('WAIT_FUTURE') main_thread.push_args(self.workspace.key(), 0, 0, -1) main_thread.append('GET_RANGE_STARTS_WITH_DATABASE') self.add_result(main_thread, args, '') test_util.blocking_commit(main_thread) self.add_result(main_thread, args, 'RESULT_NOT_PRESENT') main_thread.append('NEW_TRANSACTION') main_thread.push_args(foo[1], foo[5], 0, 0, 0) main_thread.append('GET_RANGE') self.add_result(main_thread, args, test_util.error_string(2210)) main_thread.push_args(foo[1], foo[5], 0, 0, 0) main_thread.append('GET_RANGE_DATABASE') self.add_result(main_thread, args, test_util.error_string(2210)) self.append_range_test(main_thread, args, 100, 256) self.append_range_test(main_thread, args, 1000, 8) main_thread.append('EMPTY_STACK') tup = (0, 'foo', -1093, u'unicode\u9348test', 0xffffffff + 100, 'bar\x00\xff') main_thread.push_args(*test_util.with_length(tup)) main_thread.append('TUPLE_PACK') main_thread.append('DUP') self.add_result(main_thread, args, fdb.tuple.pack(tup)) main_thread.append('TUPLE_UNPACK') for item in reversed(tup): self.add_result(main_thread, args, fdb.tuple.pack((item, ))) main_thread.push_args(0xffffffff, -100) main_thread.append('SUB') main_thread.push_args(1) main_thread.append('TUPLE_PACK') self.add_result(main_thread, args, fdb.tuple.pack( (0xffffffff + 100, ))) main_thread.append('EMPTY_STACK') main_thread.push_args(*test_util.with_length(tup)) main_thread.append('TUPLE_RANGE') rng = fdb.tuple.range(tup) self.add_result(main_thread, args, rng.stop) self.add_result(main_thread, args, rng.start) stampKey = 'stampedXXXXXXXXXXsuffix' stampKeyIndex = stampKey.find('XXXXXXXXXX') main_thread.push_args(u'SET_VERSIONSTAMPED_KEY', self.versionstamp_key(stampKey, stampKeyIndex), 'stampedBar') main_thread.append('ATOMIC_OP') main_thread.push_args(u'SET_VERSIONSTAMPED_VALUE', 'stampedValue', self.versionstamp_value('XXXXXXXXXX')) main_thread.append('ATOMIC_OP') if self.api_version >= 520: stampValue = 'stampedXXXXXXXXXXsuffix' stampValueIndex = stampValue.find('XXXXXXXXXX') main_thread.push_args( u'SET_VERSIONSTAMPED_VALUE', 'stampedValue2', self.versionstamp_value(stampValue, stampValueIndex)) main_thread.append('ATOMIC_OP') main_thread.push_args('suffix') main_thread.append('GET_VERSIONSTAMP') test_util.blocking_commit(main_thread) self.add_result(main_thread, args, 'RESULT_NOT_PRESENT') main_thread.push_args('stamped') main_thread.append('CONCAT') main_thread.append('CONCAT') main_thread.append('GET') self.add_result(main_thread, args, 'stampedBar') main_thread.push_args('stampedValue', 'suffix') main_thread.append('GET') main_thread.push_args('stamped') main_thread.append('CONCAT') main_thread.append('CONCAT') main_thread.append('GET') self.add_result(main_thread, args, 'stampedBar') if self.api_version >= 520: main_thread.push_args('stampedValue2') main_thread.append('GET') main_thread.append('GET') self.add_result(main_thread, args, 'stampedBar') main_thread.append('GET_VERSIONSTAMP') test_util.blocking_commit(main_thread) self.add_result(main_thread, args, 'RESULT_NOT_PRESENT') self.add_result(main_thread, args, test_util.error_string(2021)) main_thread.push_args('sentinel') main_thread.append('UNIT_TESTS') self.add_result(main_thread, args, 'sentinel') if not args.no_threads: wait_key = 'waitKey' # threads = [self.thread_subspace[i] for i in range(0, 2)] threads = ['thread_spec%d' % i for i in range(0, 2)] for thread_spec in threads: main_thread.push_args( self.workspace.pack((wait_key, thread_spec)), '') main_thread.append('SET_DATABASE') main_thread.append('WAIT_FUTURE') for thread_spec in threads: main_thread.push_args(thread_spec) # if len(main_thread) < args.num_ops: main_thread.append('START_THREAD') thread = test_instructions.create_thread( fdb.Subspace((thread_spec, ))) thread.append('NEW_TRANSACTION') thread.push_args(foo[1], foo[1], 'bar%s' % thread_spec, self.workspace.pack((wait_key, thread_spec)), self.workspace.pack((wait_key, thread_spec))) thread.append('GET') thread.append('POP') thread.append('SET') thread.append('CLEAR') test_util.blocking_commit(thread) thread.append('POP') thread.append('CLEAR_DATABASE') thread.push_args(self.workspace.pack((wait_key, ))) thread.append('WAIT_EMPTY') thread.append('NEW_TRANSACTION') thread.push_args(foo[1]) thread.append('GET') self.add_result(thread, args, 'barthread_spec0', 'barthread_spec1') main_thread.append('EMPTY_STACK') # if len(main_thread) > args.num_ops: # main_thread[args.num_ops:] = [] return test_instructions
def __init__(self, name): self._name = name self.consumer_lock = fdb.Subspace(('consumer_lock', name)) self.available_queue = fdb.Subspace(('available_queues', name))
def create(tr): tr.set(self._index[name], '') device = fdb.Subspace(('dev', name)) tr.set(device['meta']['block_size'], str(BLOCK_SIZE)) tr.set(device['meta']['num_blocks'], str(num_blocks))
def __init__(self, db): self._db = db self._index = fdb.Subspace(('devices',))
# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os import fdb fdb.api_version(300) pq = fdb.Subspace(('P', )) @fdb.transactional def push(tr, value, priority): tr[pq[priority][_next_count(tr, priority)][os.urandom(20)]] = value @fdb.transactional def _next_count(tr, priority): r = pq[priority].range() for key, value in tr.snapshot.get_range(r.start, r.stop, limit=1, reverse=True): return pq[priority].unpack(key)[0] + 1
def __init__(self): self.db_ops = DBOperations() self.available_queues_subspace = fdb.Subspace(('available_queues', )) self.consumer_lock_subspace = fdb.Subspace(('consumer_lock', ))
import fdb import gateaux fdb.api_version(510) class TemperatureReading(gateaux.Structure): key = (gateaux.IntegerField(name='year', ), gateaux.IntegerField(name='day', )) value = (gateaux.IntegerField(name='degrees', ), ) db = fdb.open() temp_reading_space = fdb.Subspace(('temp_readings', )) temp_reading = TemperatureReading(temp_reading_space) @fdb.transactional def clear_space(tr): # Clean up del tr[temp_reading_space.range()] @fdb.transactional def set_temp(tr, year, day, degrees): key = temp_reading.pack_key((year, day)) value = temp_reading.pack_value((degrees, )) tr[key] = value
# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import fdb fdb.api_version(300) db = fdb.open() graph = fdb.Subspace(('G', )) edge = graph['E'] inverse = graph['I'] @fdb.transactional def set_edge(tr, node, neighbor): tr[edge[node][neighbor]] = '' tr[inverse[neighbor][node]] = '' @fdb.transactional def del_edge(tr, node, neighbor): del tr[edge[node][neighbor]] del tr[inverse[neighbor][node]]