def test_blob(): import time db = fdb.open() location = directory.create_or_open(db, ('tests','blob')) b = Blob(location) print "deleting old" b.delete(db) print "writing" b.append(db, 'asdf') b.append(db, 'jkl;') b.append(db, 'foo') b.append(db, 'bar') print_blob(db, b) big_data = 1000000 print "writing lots of data" for i in range(50): print ".", b.append(db, '.'*100000) print "" print "reading section of large blob..." t = time.time() s = len(b.read(db, 1234567, big_data)) assert s == big_data print "got big section of blob"
def __init__(self, args): self.args = copy.copy(args) self.db = fdb.open(self.args.cluster_file) self.test_seed = random.randint(0, 0xffffffff) self.testers = [Tester.get_test(self.args.test1)] if self.args.test2 is not None: self.testers.append(Tester.get_test(self.args.test2)) self.test = Test.create_test(self.args.test_name, fdb.Subspace((self.args.output_subspace,))) if self.test is None: raise Exception('the test \'%s\' could not be found' % self.args.test_name) min_api_version = max([tester.min_api_version for tester in self.testers]) max_api_version = min([tester.max_api_version for tester in self.testers]) self.args.api_version = choose_api_version(self.args.api_version, min_api_version, max_api_version, self.test.min_api_version, self.test.max_api_version) util.get_logger().info('\nCreating test at API version %d' % self.args.api_version) max_int_bits = min([tester.max_int_bits for tester in self.testers]) if self.args.max_int_bits is None: self.args.max_int_bits = max_int_bits elif self.args.max_int_bits > max_int_bits: raise Exception('The specified testers support at most %d-bit ints, but --max-int-bits was set to %d' % (max_int_bits, self.args.max_int_bits)) self.args.no_threads = self.args.no_threads or any([not tester.threads_enabled for tester in self.testers]) if self.args.no_threads and self.args.concurrency > 1: raise Exception('Not all testers support concurrency') # Test types should be intersection of all tester supported types self.args.types = reduce(lambda t1, t2: filter(t1.__contains__, t2), map(lambda tester: tester.types, self.testers))
def fdb_connection(): with open('/etc/foundationdb/fdb.cluster', 'w') as f: import socket f.write('docker:docker@{}:4500'.format( socket.gethostbyname('foundationdb'))) yield fdb.open()
def load_data(keys, batch_size, value_size): batch_count = int(keys / batch_size) db = fdb.open() for batch in range(1, batch_count + 1): print("Writing batch %d" % batch) write_batch(db, batch_size, value_size)
def __init__(self, subspace) : fdb.api_version(100) # self._db = fdb.open('/home/gruppe5/fdbconf/fdb.cluster') self._db = fdb.open() self._directory = directory.create_or_open(self._db, ('twitter',)) if subspace != None : self._subspace = self._directory[subspace]
def open(): db = fdb.open() # XXX: clear database db.clear_range(b"", b"\xff") return db
def test_blob(): import time db = fdb.open() location = directory.create_or_open(db, ('tests', 'blob')) b = Blob(location) print "deleting old" b.delete(db) print "writing" b.append(db, 'asdf') b.append(db, 'jkl;') b.append(db, 'foo') b.append(db, 'bar') print_blob(db, b) big_data = 1000000 print "writing lots of data" for i in range(50): print ".", b.append(db, '.' * 100000) print "" print "reading section of large blob..." t = time.time() s = len(b.read(db, 1234567, big_data)) assert s == big_data print "got big section of blob"
def main(): db = fdb.open() db.options.set_transaction_timeout(10000) while True: lines = [] dt = datetime.now() status = {} try: status = json.loads(db[b'\xff\xff/status/json']) except fdb.FDBError as err: print("ERROR: Could not get fdb metrics: %s" % str(err.description, 'utf-8')) sleep(5) continue timestamp = str(int(dt.timestamp())) + 9 * '0' lines += generate_tsfdb_operations_metrics(db, status, timestamp) lines += generate_tsfdb_processes_metrics(status, timestamp) lines += generate_tsfdb_qos_metrics(status, timestamp) lines += generate_tsfdb_queues_metrics(db, timestamp) lines += generate_tsfdb_cluster_data_metrics(status, timestamp) print("\n".join(lines)) requests.post("%s/v1/datapoints" % TSFDB_URI, data="\n".join(lines), headers={'x-org-id': 'tsfdb'}) sleep(5)
def test_read_process(process_cnt, thread_cnt, max_rows): # Todo: process the process-count. p = multiprocessing.Process(target=test_read, args=(fdb.open(event_model='gevent'), max_rows)) p.start() p.join()
def construct_database(self, instance): if self._db is not None: return self._db # TLS options. Each option has a different function name, so we cannot be smart with it without ugly code if 'tls_certificate_file' in instance: fdb.options.set_tls_cert_path(instance.get('tls_certificate_file')) if 'tls_key_file' in instance: fdb.options.set_tls_key_path(instance.get('tls_key_file')) if 'tls_verify_peers' in instance: fdb.options.set_tls_verify_peers( instance.get('tls_verify_peers').encode('latin-1')) if 'cluster_file' in instance: self._db = fdb.open(cluster_file=instance.get('cluster_file')) else: self._db = fdb.open()
def __init__(self, thread_cnt, table, offset, increment, max_rows): self.db = fdb.open() self.thread_cnt = thread_cnt self.table = table self.offset = offset self.increment = increment self.max_rows = max_rows self.accu = [] super(FdbProcess, self).__init__()
def _setup(self): self.db = fdb.open() with self._begin(write=True) as tr: self._files = fdb.directory.create_or_open(tr, u'fs') self._history = fdb.directory.create_or_open(tr, u'hist') self._kv = fdb.directory.create_or_open(tr, u'kv') self._repos = fdb.directory.create_or_open(tr, u'repo') self._perms = fdb.directory.create_or_open(tr, u'perms') self._active_repos = {}
def add_batch_process(lim, bsize, dsize, rand_size): db = fdb.open() i = 0 while i < lim if lim else 1: try: add(db, bsize, dsize, rand_size) except Exception: logging.exception('') if lim: i = i + 1
def main(cluster_file): try: fdb.api_version(520) db = fdb.open(cluster_file) # Get the FDB Cluster status in the json format results = db.get("\xff\xff/status/json") fdb_status = json.loads(results) coordinator_health = fdb_status.get('client', {}).get('coordinators', {}) quorum_reachable = coordinator_health.get('quorum_reachable', False) coordinators = coordinator_health.get('coordinators', []) reachable = 0 total_coordinators = len(coordinators) for coordinator in coordinators: reachable += 1 if coordinator.get('reachable', False) else 0 processes = fdb_status.get('cluster', {}).pop('processes', {}) friendly_processes = [] for proc in processes.keys(): friendly_process = processes[proc] friendly_process['process'] = proc friendly_process['messages'] = len( friendly_process.get('messages', [])) friendly_processes.append(friendly_process) fdb_status['cluster']['processes'] = friendly_processes machines = fdb_status.get('cluster', {}).pop('machines', {}) friendly_machines = [] for machine in machines.keys(): friendly_machine = machines[machine] friendly_machines.append(friendly_machine) fdb_status['cluster']['machines'] = friendly_machines # replacement slug for coordinators under client telemetry_friendly = { 'total': total_coordinators, 'reachable': reachable, 'quorum_reachable': 1 if quorum_reachable else 0, 'coordinators': coordinators, } fdb_status['client']['coordinators'] = telemetry_friendly global_tags['storage_engine'] = fdb_status.get('cluster', {}).get( 'configuration', {}).get('storage_engine') except Exception as ex: handle_error(str(ex)) json_to_influxdb(fdb_status)
def __init__(self, conn_url='/usr/local/etc/foundationdb/fdb.cluster', dbname='skunkqueue'): fdb.api_version(200) self.conn = fdb.open(conn_url) self.skunkdb = fdb.directory.create_or_open(self.conn, (dbname,)) self.worker_space = self.skunkdb['worker'] self.result_space = self.skunkdb['result'] self.job_queues = {}
def stringintern_example(): db = fdb.open() location = fdb.directory.create_or_open(db, ('tests', 'stringintern')) strs = StringIntern(location) db["0"] = strs.intern(db, "testing 123456789") db["1"] = strs.intern(db, "dog") db["2"] = strs.intern(db, "testing 123456789") db["3"] = strs.intern(db, "cat") db["4"] = strs.intern(db, "cat") for k, v in db['0':'9']: print k, '=', strs.lookup(db, v)
def simpledoc_example(): db = fdb.open() print "Insert initial data" set_sample_data(db) print_simpledoc(db) print "Query data" print "Find all dogs:", find_all_dogs(db) print "Find pets of alice:" for p in pets_of_owner(db, 'alice'): print " ", p print "Modify and query data" set_vacation_status(db, 'bob', 'bermuda') print "Pets with owners on vacation: ", pets_on_vacation(db)
def run_test(self): try: db = fdb.open(None, "DB") except KeyboardInterrupt: raise except Exception: self.result.add_error(self.get_error("fdb.open failed")) return try: self.test_performance(db) except KeyboardInterrupt: raise except Exception: self.result.add_error( self.get_error("Failed to complete all tests"))
def smoke_test(): db = fdb.open() working_dir = fdb.directory.create_or_open(db, (u'working',)) workspace = Workspace(working_dir, db) current = workspace.current clear_subspace(db, current) db[current[1]] = 'a' db[current[2]] = 'b' print "contents:" print_subspace(db, current) with workspace as newspace: clear_subspace(db, newspace) db[newspace[3]] = 'c' db[newspace[4]] = 'd' print "contents:" print_subspace(db, workspace.current)
def run_test(self): try: db = fdb.open(None, 'DB') except KeyboardInterrupt: raise except: self.result.add_error(self.get_error('fdb.open failed')) return try: self.test_performance(db) except KeyboardInterrupt: raise except: self.result.add_error( self.get_error('Failed to complete all tests'))
def simpledoc_example(): db = fdb.open() print "Insert initial data" set_sample_data(db) print_simpledoc(db) print "Query data" print "Find all dogs:", find_all_dogs(db) print "Find pets of alice:" for p in pets_of_owner(db, 'alice'): print " ", p print "Modify and query data" set_vacation_status(db, 'bob', 'bermuda') print "Pets with owners on vacation: ", pets_on_vacation(db)
class FdbCollector(object): db = fdb.open() def collect(self): data = json.loads(read_metrics(self.db).decode()) # https://github.com/apple/foundationdb/issues/398 yield GaugeMetricFamily( 'fdb_workload_operations_reads_per_second', 'Read operations per second', value=data['cluster']['workload']['operations']['reads']['hz']) yield GaugeMetricFamily( 'fdb_workload_operations_writes_per_second', 'Total number of write operations', value=data['cluster']['workload']['operations']['writes']['hz']) yield CounterMetricFamily('fdb_workload_operations_writes_total', 'Total number of write operations', value=data['cluster']['workload'] ['operations']['writes']['counter']) yield CounterMetricFamily('fdb_workload_transactions_committed_total', 'Total number of committed transactions', value=data['cluster']['workload'] ['transactions']['committed']['counter']) yield CounterMetricFamily('fdb_workload_transactions_conflicted_total', 'Total number of transaction conflicts', value=data['cluster']['workload'] ['transactions']['conflicted']['counter']) yield CounterMetricFamily('fdb_workload_transactions_started_total', 'Total number of started transactions', value=data['cluster']['workload'] ['transactions']['started']['counter']) yield CounterMetricFamily( 'fdb_latency_read_seconds', 'Time to perform a single read', value=data['cluster']['latency_probe']['read_seconds']) yield CounterMetricFamily( 'fdb_latency_commit_seconds', 'Time to commit a sample transaction', value=data['cluster']['latency_probe']['commit_seconds'])
def stringintern_example(): db = fdb.open() location = directory.create_or_open(db, ('tests', 'stringintern')) strs = StringIntern(location) def test_insert(tr): tr["0"] = strs.intern(tr, "testing 123456789") tr["1"] = strs.intern(tr, "dog") tr["2"] = strs.intern(tr, "testing 123456789") tr["3"] = strs.intern(tr, "cat") tr["4"] = strs.intern(tr, "cat") test_insert(db) tr = db.create_transaction() for k, v in tr['0':'9']: print k, '=', strs.lookup(tr, v)
def main(): fdb.api_version(510) db = fdb.open() stores = Stores(db) # del db[:] stores.create('foobar', 1000000) stores.create('example', 1000000) for name in stores.list(): print 'store %s\n nbd-client -N %s 127.0.0.1 /dev/nbd0' % (name, name) server = Server(('127.0.0.1', 10809), stores) gevent.signal(signal.SIGTERM, server.stop) gevent.signal(signal.SIGINT, server.stop) server.serve_forever()
def main(): fdb.api_version(510) db = fdb.open() stores = Stores(db) # del db[:] stores.create('foobar', 1000000) stores.create('example', 1000000) for name in stores.list(): print('store %s\n nbd-client -N %s 127.0.0.1 /dev/nbd0' % (name, name)) server = Server(('127.0.0.1', 10809), stores) gevent.signal(signal.SIGTERM, server.stop) gevent.signal(signal.SIGINT, server.stop) server.serve_forever()
def spatial_example(): db = fdb.open() index_location = directory.create_or_open(db, ('tests','spatial')) s = SpatialIndex( index_location ) s.clear(db) print "point d is at", s.get_location(db, 'd') s.set_location(db, 'a', (3,2)) s.set_location(db, 'b', (1,4)) s.set_location(db, 'c', (5,3)) s.set_location(db, 'd', (2,3)) s.set_location(db, 'e', (0,0)) print "point d is at", s.get_location(db, 'd') print "Searching in rectangle (1,1) -> (5,5):" print s.get_in_rectangle(db, Rect((1,1), (5,5)))
def run_test(self): try: db = fdb.open(None, "DB") except KeyboardInterrupt: raise except Exception: self.result.add_error(self.get_error("fdb.open failed")) return try: print("Testing functions...") self.test_functions(db) print("Testing correctness...") del db[:] self.test_correctness(db) except KeyboardInterrupt: raise except Exception: self.result.add_error(self.get_error("Failed to complete all tests"))
def run_test(self): try: db = fdb.open(None, 'DB') except KeyboardInterrupt: raise except: self.result.add_error(self.getError('fdb.open failed')) return try: print('Testing functions...') self.testFunctions(db) print('Testing correctness...') del db[:] self.testCorrectness(db) except KeyboardInterrupt: raise except: self.result.add_error( self.getError('Failed to complete all tests'))
def open(cluster_file=None, dir_path=("joshua",)): global db, dir_top, dir_ensembles, dir_active, dir_sanity, dir_all_ensembles, dir_ensemble_data, dir_ensemble_results global dir_ensemble_results_pass, dir_ensemble_results_fail, dir_ensemble_incomplete, dir_ensemble_results_large global dir_active_changes, dir_sanity_changes, dir_failures db = fdb.open(cluster_file) dir_top = create_or_open_top_path(db, dir_path) dir_ensembles = dir_top.create_or_open(db, "ensembles") dir_active = dir_ensembles.create_or_open(db, "active") dir_sanity = dir_ensembles.create_or_open(db, "sanity") dir_all_ensembles = dir_ensembles.create_or_open(db, "all") dir_ensemble_data = dir_ensembles.create_or_open(db, "data") dir_ensemble_incomplete = dir_ensembles.create_or_open(db, "incomplete") dir_ensemble_results = dir_ensembles.create_or_open(db, "results") dir_ensemble_results_pass = dir_ensemble_results.create_or_open(db, "pass") dir_ensemble_results_fail = dir_ensemble_results.create_or_open(db, "fail") dir_ensemble_results_large = dir_ensemble_results.create_or_open( db, "large") dir_failures = dir_top.create_or_open(db, "failures") dir_active_changes = dir_active dir_sanity_changes = dir_sanity
def run(clusterFile): db = fdb.open(clusterFile) db.options.set_transaction_timeout(2000) # 2 seconds db.options.set_transaction_retry_limit(3) value = 'a' * 1024 setValue(db, 't1', value) assert (value == db['t1']) try: db.options.set_transaction_size_limit(1000) setValue(db, 't2', value) assert (False) # not reached except fdb.impl.FDBError as e: assert (e.code == 2101) # Transaction exceeds byte limit (2101) # Per transaction option overrides database option db.options.set_transaction_size_limit(1000000) try: setValueWithLimit(db, 't3', value, 1000) assert (False) # not reached except fdb.impl.FDBError as e: assert (e.code == 2101) # Transaction exceeds byte limit (2101)
assert False except fdb.FDBError as e: assert e.code == 2131 # tenant not found del tenant2[:] db.delete_tenant(b'tenant2') assert db[prefix1 + b'tenant_test_key'] == None assert db[prefix2 + b'tenant_test_key'] == None assert db[b'tenant_test_key'] == b'no_tenant' del db[b'tenant_test_key'] assert db[b'tenant_test_key'] == None def test_tenants(db): test_tenant_tuple_name(db) test_tenant_operations(db) # Expect a cluster file as input. This test will write to the FDB cluster, so # be aware of potential side effects. if __name__ == '__main__': clusterFile = sys.argv[1] db = fdb.open(clusterFile) db.options.set_transaction_timeout(2000) # 2 seconds db.options.set_transaction_retry_limit(3) test_tenants(db)
from directory_extension import DirectoryExtension from cancellation_timeout_tests import test_timeouts from cancellation_timeout_tests import test_db_timeouts from cancellation_timeout_tests import test_cancellation from cancellation_timeout_tests import test_retry_limits from cancellation_timeout_tests import test_db_retry_limits from cancellation_timeout_tests import test_combinations from size_limit_tests import test_size_limit_option, test_get_approximate_size random.seed(0) if len(sys.argv) == 4: db = fdb.open(sys.argv[3]) else: db = fdb.open() class Stack: def __init__(self): self.stack = [] def __repr__(self): return repr(self.stack) def __str__(self): return str(self.stack) def __len__(self):
import numbers import os import os.path import gevent from gevent.queue import Queue, Empty import blob import fdb import fdb.tuple import simpledoc from subspace import Subspace fdb.api_version(100) db = fdb.open(event_model="gevent") @fdb.transactional def clear_subspace(tr, subspace): tr.clear_range_startswith(subspace.key()) ############################## ## Base class for the layer ## ############################## class BulkLoader(Queue): ''' Supports the use of multiple concurrent transactions for efficiency, with a default of 50 concurrent transactions. ''' def __init__(self, number_producers=1, number_consumers=50, **kwargs):
def connect(cluster_file=None): db = fdb.open(cluster_file=cluster_file) return db
def __init__(self, subspace) : fdb.api_version(100) self._db = fdb.open() self._directory = directory.create_or_open(self._db, ('twitter',)) if subspace != None : self._subspace = self._directory[subspace]
if not dry_run: prefix = tr[node_info.parent[directory_layer.SUBDIRS][node_info.path[-1]]] del tr[node_info.parent[directory_layer.SUBDIRS][node_info.path[-1]]] tr[node_info.parent[directory_layer.SUBDIRS][unicode_path[-1]]] = prefix if __name__ == '__main__': parser = argparse.ArgumentParser(description='Upgrades a directory hierarchy created with an older version of the directory layer to version %s.%s.%s' % VERSION) parser.add_argument('-C', dest='cluster_file', type=str, help='The cluster file for the database where the directory resides. If none is specified, then the default cluster file is used.', default=None) parser.add_argument('-f', dest='force', action='store_true', help='Attempt an upgrade even if the version of the directory is current. This is useful if modifications were made to an upgraded directory from the original directory layer.') parser.add_argument('--node-subspace', dest='node_subspace', type=str, help='The node subspace that the directory was created with. If none is specified, then the default node subspace is used.', default=None) # parser.add_argument('--upgrade-partitions', dest='upgrade-partitions', action='store_true' help='If set, then partitions found in the directory hierarchy will be opened and upgraded recursively.' args = parser.parse_args() print '' try: db = fdb.open(args.cluster_file) if args.node_subspace: upgrade_dir = directory.DirectoryLayer(node_subspace=args.node_subspace) else: upgrade_dir = directory.directory upgrade(db, upgrade_dir, args.force) except fdb.FDBError as e: print str(e) print ''
import random from pubsub_bigdoc import PubSub parser = argparse.ArgumentParser() parser.add_argument("--zkAddr") parser.add_argument("--database") parser.add_argument("--totalUsers", type=int) parser.add_argument("--followers", type=int) args = parser.parse_args() # zkAddr = '10.0.3.1:2181/bbc' # database = 'TwitDB' # users = 1000 # followers = 10 db = fdb.open(args.zkAddr, args.database) ps = PubSub(db) print 'creating subscriptions', for i in range(0, args.followers): u1 = random.randint(0, args.totalUsers) u2 = random.randint(0, args.totalUsers) if u1 != u2: ps.create_subscription(ps.get_feed_by_name('%09d' % u1), ps.get_inbox_by_name('%09d' % u2)) if i > 0 and i % 100 == 0: print i, print 'done' # @fdb.transactional # def done(tr):
import numbers import os import os.path import gevent from gevent.queue import Queue, Empty import blob import fdb import fdb.tuple import simpledoc from subspace import Subspace fdb.api_version(200) db = fdb.open(event_model="gevent") @fdb.transactional def clear_subspace(tr, subspace): tr.clear_range_startswith(subspace.key()) ############################## ## Base class for the layer ## ############################## class BulkLoader(Queue): ''' Supports the use of multiple concurrent transactions for efficiency, with a default of 50 concurrent transactions. ''' def __init__(self, number_producers=1, number_consumers=50, **kwargs):
def move(self, db_or_tr, new_path): return self.directoryLayer.move(db_or_tr, self.path, new_path) def remove(self, db_or_tr): return self.directoryLayer.remove(db_or_tr, self.path) def list(self, db_or_tr): return self.directoryLayer.list(db_or_tr, self.path) def strinc(key): lastc = (ord(key[-1:]) + 1) % 256 if lastc: return key[:-1] + chr(lastc) else: return strinc(key[:-1]) + chr(lastc) if __name__ == "__main__": # If module is run as a script, print the directory tree. This code will # not work well if there are huge numbers of directories! @fdb.transactional def printdirs(tr, root, indent=""): for name in root.list(tr): child = root.open(tr, name) print indent + name, child.layer or "" printdirs(tr, child, indent + " ") db = fdb.open() printdirs(db, directory)
import fdb import gateaux fdb.api_version(510) class TemperatureReading(gateaux.Structure): key = (gateaux.IntegerField(name='year', ), gateaux.IntegerField(name='day', )) value = (gateaux.IntegerField(name='degrees', ), ) db = fdb.open() temp_reading_space = fdb.Subspace(('temp_readings', )) temp_reading = TemperatureReading(temp_reading_space) @fdb.transactional def clear_space(tr): # Clean up del tr[temp_reading_space.range()] @fdb.transactional def set_temp(tr, year, day, degrees): key = temp_reading.pack_key((year, day)) value = temp_reading.pack_value((degrees, )) tr[key] = value
def connect(self) : fdb.api_version(100) self.__db = fdb.open(self._cluster, self._db, self._event_model)