def __init__(self, subspace) : fdb.api_version(100) # self._db = fdb.open('/home/gruppe5/fdbconf/fdb.cluster') self._db = fdb.open() self._directory = directory.create_or_open(self._db, ('twitter',)) if subspace != None : self._subspace = self._directory[subspace]
def main(cluster_file): try: fdb.api_version(520) db = fdb.open(cluster_file) # Get the FDB Cluster status in the json format results = db.get("\xff\xff/status/json") fdb_status = json.loads(results) coordinator_health = fdb_status.get('client', {}).get('coordinators', {}) quorum_reachable = coordinator_health.get('quorum_reachable', False) coordinators = coordinator_health.get('coordinators', []) reachable = 0 total_coordinators = len(coordinators) for coordinator in coordinators: reachable += 1 if coordinator.get('reachable', False) else 0 processes = fdb_status.get('cluster', {}).pop('processes', {}) friendly_processes = [] for proc in processes.keys(): friendly_process = processes[proc] friendly_process['process'] = proc friendly_process['messages'] = len( friendly_process.get('messages', [])) friendly_processes.append(friendly_process) fdb_status['cluster']['processes'] = friendly_processes machines = fdb_status.get('cluster', {}).pop('machines', {}) friendly_machines = [] for machine in machines.keys(): friendly_machine = machines[machine] friendly_machines.append(friendly_machine) fdb_status['cluster']['machines'] = friendly_machines # replacement slug for coordinators under client telemetry_friendly = { 'total': total_coordinators, 'reachable': reachable, 'quorum_reachable': 1 if quorum_reachable else 0, 'coordinators': coordinators, } fdb_status['client']['coordinators'] = telemetry_friendly global_tags['storage_engine'] = fdb_status.get('cluster', {}).get( 'configuration', {}).get('storage_engine') except Exception as ex: handle_error(str(ex)) json_to_influxdb(fdb_status)
def __init__(self, conn_url='/usr/local/etc/foundationdb/fdb.cluster', dbname='skunkqueue'): fdb.api_version(200) self.conn = fdb.open(conn_url) self.skunkdb = fdb.directory.create_or_open(self.conn, (dbname,)) self.worker_space = self.skunkdb['worker'] self.result_space = self.skunkdb['result'] self.job_queues = {}
def main(): fdb.api_version(510) db = fdb.open() stores = Stores(db) # del db[:] stores.create('foobar', 1000000) stores.create('example', 1000000) for name in stores.list(): print('store %s\n nbd-client -N %s 127.0.0.1 /dev/nbd0' % (name, name)) server = Server(('127.0.0.1', 10809), stores) gevent.signal(signal.SIGTERM, server.stop) gevent.signal(signal.SIGINT, server.stop) server.serve_forever()
def main(): fdb.api_version(510) db = fdb.open() stores = Stores(db) # del db[:] stores.create('foobar', 1000000) stores.create('example', 1000000) for name in stores.list(): print 'store %s\n nbd-client -N %s 127.0.0.1 /dev/nbd0' % (name, name) server = Server(('127.0.0.1', 10809), stores) gevent.signal(signal.SIGTERM, server.stop) gevent.signal(signal.SIGINT, server.stop) server.serve_forever()
import glob import json import numbers import os import os.path import gevent from gevent.queue import Queue, Empty import blob import fdb import fdb.tuple import simpledoc from subspace import Subspace fdb.api_version(200) db = fdb.open(event_model="gevent") @fdb.transactional def clear_subspace(tr, subspace): tr.clear_range_startswith(subspace.key()) ############################## ## Base class for the layer ## ############################## class BulkLoader(Queue): ''' Supports the use of multiple concurrent transactions for efficiency, with a default of 50 concurrent transactions.
# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import fdb import sys import json from fdb.tuple import pack if __name__ == '__main__': fdb.api_version(710) def test_tenant_tuple_name(db): tuplename = (b'test', b'level', b'hierarchy', 3, 1.24, 'str') db.allocate_tenant(tuplename) tenant = db.open_tenant(tuplename) tenant[b'foo'] = b'bar' assert tenant[b'foo'] == b'bar' del tenant[b'foo'] db.delete_tenant(tuplename)
import glob import json import numbers import os import os.path import gevent from gevent.queue import Queue, Empty import blob import fdb import fdb.tuple import simpledoc from subspace import Subspace fdb.api_version(100) db = fdb.open(event_model="gevent") @fdb.transactional def clear_subspace(tr, subspace): tr.clear_range_startswith(subspace.key()) ############################## ## Base class for the layer ## ############################## class BulkLoader(Queue): ''' Supports the use of multiple concurrent transactions for efficiency, with a default of 50 concurrent transactions.
threading.Thread(target=inbox_driver, args=(inbox_map[id], )) for id in inbox_map ] for f in feed_threads: f.start() for i in inbox_threads: i.start() for f in feed_threads: f.join() for i in inbox_threads: i.join() def sample_pubsub(feeds, inboxes, messages): feed_map, inbox_map = setup_topology(feeds, inboxes) run_threads(feed_map, inbox_map, messages) if __name__ == "__main__": import random import threading import time import fdb fdb.api_version(22) db = fdb.open() ps = PubSub(db) ps.clear_all_messages() sample_pubsub(3, 3, 3)
# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from collections import defaultdict from typing import Dict, Tuple, List import fdb fdb.api_version(520) import fdb.tuple from io import BytesIO from datetime import datetime, timedelta, timezone import hashlib import heapq import os import re import random import socket import struct import time import traceback import xml.etree.ElementTree as ET import zlib import sys
import xml.etree.ElementTree as ET import zlib from collections import defaultdict from io import BytesIO from typing import Dict from typing import List from typing import Optional from typing import Tuple import fdb import fdb.tuple logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) fdb.api_version(630) FDBError = fdb.FDBError ONE = b"\x01" + b"\x00" * 7 TIMESTAMP_FMT = "%Y%m%d-%H%M%S" TIMEDELTA_REGEX1 = re.compile( r"(?P<days>[-\d]+) day[s]*, (?P<hours>\d+):(?P<minutes>\d+):(?P<seconds>\d[\.\d+]*)" ) TIMEDELTA_REGEX2 = re.compile( r"(?P<hours>\d+):(?P<minutes>\d+):(?P<seconds>\d[\.\d+]*)") # A random instance ID as the seed for Joshua agent instanceid = os.urandom(8) BLOB_KEY_LIMIT = 8192
# you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import fdb import sys if __name__ == '__main__': fdb.api_version(610) @fdb.transactional def setValue(tr, key, value): tr[key] = value @fdb.transactional def setValueWithLimit(tr, key, value, limit): tr.options.set_size_limit(limit) tr[key] = value def test_size_limit_option(db): db.options.set_transaction_timeout(2000) # 2 seconds
def run(self): for idx, i in enumerate(self.instructions): op_tuple = fdb.tuple.unpack(i.value) op = op_tuple[0] # print("Stack is %r" % self.stack) # if op != "PUSH" and op != "SWAP": # print("%d. Instruction is %s" % (idx, op)) isDatabase = op.endswith(six.u('_DATABASE')) isSnapshot = op.endswith(six.u('_SNAPSHOT')) if isDatabase: op = op[:-9] obj = self.db elif isSnapshot: op = op[:-9] obj = self.current_transaction().snapshot else: obj = self.current_transaction() inst = Instruction(obj, self.stack, op, idx, isDatabase, isSnapshot) try: if inst.op == six.u("PUSH"): inst.push(op_tuple[1]) elif inst.op == six.u("DUP"): inst.stack.push(*self.stack[0]) elif inst.op == six.u("EMPTY_STACK"): self.stack = Stack() elif inst.op == six.u("SWAP"): idx = inst.pop() self.stack[0], self.stack[idx] = self.stack[ idx], self.stack[0] elif inst.op == six.u("POP"): inst.pop() elif inst.op == six.u("SUB"): a, b = inst.pop(2) inst.push(a - b) elif inst.op == six.u("CONCAT"): a, b = inst.pop(2) inst.push(a + b) elif inst.op == six.u("WAIT_FUTURE"): old_idx, item = inst.pop(with_idx=True) inst.stack.push(old_idx, item) elif inst.op == six.u("NEW_TRANSACTION"): self.new_transaction() elif inst.op == six.u("USE_TRANSACTION"): self.switch_transaction(inst.pop()) elif inst.op == six.u("ON_ERROR"): inst.push(inst.tr.on_error(inst.pop())) elif inst.op == six.u("GET"): key = inst.pop() num = random.randint(0, 2) if num == 0: f = obj[key] elif num == 1: f = obj.get(key) else: f = obj.__getitem__(key) if f == None: inst.push(b'RESULT_NOT_PRESENT') else: inst.push(f) elif inst.op == six.u("GET_KEY"): key, or_equal, offset, prefix = inst.pop(4) result = obj.get_key(fdb.KeySelector( key, or_equal, offset)) if result.startswith(prefix): inst.push(result) elif result < prefix: inst.push(prefix) else: inst.push(strinc(prefix)) elif inst.op == six.u("GET_RANGE"): begin, end, limit, reverse, mode = inst.pop(5) if limit == 0 and mode == -1 and random.random() < 0.5: if reverse: r = obj[begin:end:-1] else: r = obj[begin:end] else: r = obj.get_range(begin, end, limit, reverse, mode) self.push_range(inst, r) elif inst.op == six.u("GET_RANGE_STARTS_WITH"): prefix, limit, reverse, mode = inst.pop(4) self.push_range( inst, obj.get_range_startswith(prefix, limit, reverse, mode)) elif inst.op == six.u("GET_RANGE_SELECTOR"): begin_key, begin_or_equal, begin_offset, end_key, end_or_equal, end_offset, limit, reverse, mode, prefix = inst.pop( 10) beginSel = fdb.KeySelector(begin_key, begin_or_equal, begin_offset) endSel = fdb.KeySelector(end_key, end_or_equal, end_offset) if limit == 0 and mode == -1 and random.random() < 0.5: if reverse: r = obj[beginSel:endSel:-1] else: r = obj[beginSel:endSel] else: r = obj.get_range(beginSel, endSel, limit, reverse, mode) self.push_range(inst, r, prefix_filter=prefix) elif inst.op == six.u("GET_READ_VERSION"): self.last_version = obj.get_read_version().wait() inst.push(b"GOT_READ_VERSION") elif inst.op == six.u("SET"): key, value = inst.pop(2) if random.random() < 0.5: obj[key] = value else: obj.set(key, value) if obj == self.db: inst.push(b"RESULT_NOT_PRESENT") elif inst.op == six.u("LOG_STACK"): prefix = inst.pop() entries = {} while len(self.stack) > 0: stack_index = len(self.stack) - 1 entries[stack_index] = inst.pop(with_idx=True) if len(entries) == 100: self.log_stack(self.db, prefix, entries) entries = {} self.log_stack(self.db, prefix, entries) elif inst.op == six.u("ATOMIC_OP"): opType, key, value = inst.pop(3) getattr(obj, opType.lower())(key, value) if obj == self.db: inst.push(b"RESULT_NOT_PRESENT") elif inst.op == six.u("SET_READ_VERSION"): inst.tr.set_read_version(self.last_version) elif inst.op == six.u("CLEAR"): if random.random() < 0.5: del obj[inst.pop()] else: obj.clear(inst.pop()) if obj == self.db: inst.push(b"RESULT_NOT_PRESENT") elif inst.op == six.u("CLEAR_RANGE"): begin, end = inst.pop(2) num = random.randint(0, 2) if num == 0: del obj[begin:end] elif num == 1: obj.clear_range(begin, end) else: obj.__delitem__(slice(begin, end)) if obj == self.db: inst.push(b"RESULT_NOT_PRESENT") elif inst.op == six.u("CLEAR_RANGE_STARTS_WITH"): obj.clear_range_startswith(inst.pop()) if obj == self.db: inst.push(b"RESULT_NOT_PRESENT") elif inst.op == six.u("READ_CONFLICT_RANGE"): inst.tr.add_read_conflict_range(inst.pop(), inst.pop()) inst.push(b"SET_CONFLICT_RANGE") elif inst.op == six.u("WRITE_CONFLICT_RANGE"): inst.tr.add_write_conflict_range(inst.pop(), inst.pop()) inst.push(b"SET_CONFLICT_RANGE") elif inst.op == six.u("READ_CONFLICT_KEY"): inst.tr.add_read_conflict_key(inst.pop()) inst.push(b"SET_CONFLICT_KEY") elif inst.op == six.u("WRITE_CONFLICT_KEY"): inst.tr.add_write_conflict_key(inst.pop()) inst.push(b"SET_CONFLICT_KEY") elif inst.op == six.u("DISABLE_WRITE_CONFLICT"): inst.tr.options.set_next_write_no_write_conflict_range() elif inst.op == six.u("COMMIT"): inst.push(inst.tr.commit()) elif inst.op == six.u("RESET"): inst.tr.reset() elif inst.op == six.u("CANCEL"): inst.tr.cancel() elif inst.op == six.u("GET_COMMITTED_VERSION"): self.last_version = inst.tr.get_committed_version() inst.push(b"GOT_COMMITTED_VERSION") elif inst.op == six.u("GET_VERSIONSTAMP"): inst.push(inst.tr.get_versionstamp()) elif inst.op == six.u("TUPLE_PACK"): count = inst.pop() items = inst.pop(count) inst.push(fdb.tuple.pack(tuple(items))) elif inst.op == six.u("TUPLE_PACK_WITH_VERSIONSTAMP"): prefix = inst.pop() count = inst.pop() items = inst.pop(count) if not fdb.tuple.has_incomplete_versionstamp( items) and random.random() < 0.5: inst.push(b"ERROR: NONE") else: try: packed = fdb.tuple.pack_with_versionstamp( tuple(items), prefix=prefix) inst.push(b"OK") inst.push(packed) except ValueError as e: if str(e).startswith("No incomplete"): inst.push(b"ERROR: NONE") else: inst.push(b"ERROR: MULTIPLE") elif inst.op == six.u("TUPLE_UNPACK"): for i in fdb.tuple.unpack(inst.pop()): inst.push(fdb.tuple.pack((i, ))) elif inst.op == six.u("TUPLE_SORT"): count = inst.pop() items = inst.pop(count) unpacked = map(fdb.tuple.unpack, items) if six.PY3: sorted_items = sorted(unpacked, key=fdb.tuple.pack) else: sorted_items = sorted(unpacked, cmp=fdb.tuple.compare) for item in sorted_items: inst.push(fdb.tuple.pack(item)) elif inst.op == six.u("TUPLE_RANGE"): count = inst.pop() items = inst.pop(count) r = fdb.tuple.range(tuple(items)) inst.push(r.start) inst.push(r.stop) elif inst.op == six.u("ENCODE_FLOAT"): f_bytes = inst.pop() f = struct.unpack(">f", f_bytes)[0] inst.push(fdb.tuple.SingleFloat(f)) elif inst.op == six.u("ENCODE_DOUBLE"): d_bytes = inst.pop() d = struct.unpack(">d", d_bytes)[0] inst.push(d) elif inst.op == six.u("DECODE_FLOAT"): f = inst.pop() f_bytes = struct.pack(">f", f.value) inst.push(f_bytes) elif inst.op == six.u("DECODE_DOUBLE"): d = inst.pop() d_bytes = struct.pack(">d", d) inst.push(d_bytes) elif inst.op == six.u("START_THREAD"): t = Tester(self.db, inst.pop()) thr = threading.Thread(target=t.run) thr.start() self.threads.append(thr) elif inst.op == six.u("WAIT_EMPTY"): prefix = inst.pop() Tester.wait_empty(self.db, prefix) inst.push(b"WAITED_FOR_EMPTY") elif inst.op == six.u("UNIT_TESTS"): assert fdb.is_api_version_selected() api_version = fdb.get_api_version() try: fdb.api_version(api_version + 1) raise RuntimeError( 'Was not stopped from selecting two API versions') except RuntimeError as e: assert str( e ) == 'FDB API already loaded at version {}'.format( api_version) try: fdb.api_version(api_version - 1) raise RuntimeError( 'Was not stopped from selecting two API versions') except RuntimeError as e: assert str( e ) == 'FDB API already loaded at version {}'.format( api_version) fdb.api_version(api_version) try: db.options.set_location_cache_size(100001) test_options(db) test_watches(db) test_cancellation(db) test_retry_limits(db) test_timeouts(db) test_combinations(db) test_locality(db) test_predicates() except fdb.FDBError as e: print("Unit tests failed: %s" % e.description) traceback.print_exc() raise Exception("Unit tests failed: %s" % e.description) elif inst.op.startswith(six.u('DIRECTORY_')): self.directory_extension.process_instruction(inst) else: raise Exception("Unknown op %s" % inst.op) except fdb.FDBError as e: # print('ERROR: %s' % repr(e)) inst.stack.push( idx, fdb.tuple.pack( (b"ERROR", str(e.code).encode('ascii')))) # print(" to %s" % self.stack) # print() [thr.join() for thr in self.threads]
# you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import fdb import sys if __name__ == '__main__': fdb.api_version(700) @fdb.transactional def setValue(tr, key, value): tr[key] = value @fdb.transactional def setValueWithLimit(tr, key, value, limit): tr.options.set_size_limit(limit) tr[key] = value def test_size_limit_option(db): value = b'a' * 1024 setValue(db, b't1', value) assert(value == db[b't1'])
#!/usr/bin/env python3 ''' The FoundationDB example here: https://apple.github.io/foundationdb/data-modeling.html#data-modeling-tuples Converted into using `gateaux` structures and some test code added. ''' import fdb import gateaux fdb.api_version(510) class TemperatureReading(gateaux.Structure): key = (gateaux.IntegerField(name='year', ), gateaux.IntegerField(name='day', )) value = (gateaux.IntegerField(name='degrees', ), ) db = fdb.open() temp_reading_space = fdb.Subspace(('temp_readings', )) temp_reading = TemperatureReading(temp_reading_space) @fdb.transactional def clear_space(tr): # Clean up del tr[temp_reading_space.range()]
# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import fdb import fdb.tuple import struct _unpackedPrefix = 'unpacked' _packedPrefix = 'packed' fdb.api_version(16) class _MergedData: def __init__(self): self.results = [] self.finishedPack = False self.finishedUnpack = False self.packedIndex = 0 pass class Column: def __init__(self, columnName): self.columnName = columnName self.packFetchCount = 10
# you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import fdb import sys if __name__ == '__main__': fdb.api_version(720) @fdb.transactional def setValue(tr, key, value): tr[key] = value @fdb.transactional def setValueWithLimit(tr, key, value, limit): tr.options.set_size_limit(limit) tr[key] = value def test_size_limit_option(db): value = b'a' * 1024
import itertools import random import fdb import fdb.tuple from pyDatalog import pyDatalog from datalog import Datalog, kvp fdb.api_version(300) db = fdb.open() # app is a subspace for an open directory app = fdb.directory.create_or_open(db, ('app,')) Datalog(app, 10) ############################ ## Relational Algebra ## ############################ pyDatalog.create_atoms('q,r,s,X,Y,Z') # Select r(X, 'foo') # Project q(X) <= r(X, Y) # Join q(X, Y, Z) <= r(X, Y) & s(X, Z)
import struct import fdb fdb.api_version(600) db = fdb.open() db[b'hello'] = b'world' print(db[b'hello']) test = fdb.directory.create_or_open(db, ('test',)) dictionary = test['dict'] words = [x.rstrip() for x in open('/usr/share/dict/words').readlines()] print(f'Loaded {len(words)}') @fdb.transactional def add_batch(tr, words): for word in words: #print('Adding word', word) tr[dictionary.pack(('words', word,))] = b'' tr.add(dictionary.pack(('__meta__', 'count')), struct.pack('<q', 1)) def add_words(db, words, batch_size=2048): total = 0 for i in range(0, len(words), batch_size): tmp = words[i:i + batch_size] print(f'Batch {i}') add_batch(db, tmp)
"""FoundationDB Vector Layer. Provides the Vector() class for storing and manipulating arrays in FoundationDB. """ import fdb import fdb.tuple import threading fdb.api_version(200) ######################## # _ImplicitTransaction # ######################## # A local class which is used to allow vector operations to be performed without # explicitly passing a transaction. It is created by vector.use_transaction # and is used as follows: # # with vector.use_transaction(tr): # vector[0] = 1 # vector.push(1) # ... class _ImplicitTransaction: def __init__(self, vector, tr): self.vector = vector self.tr = tr self.initialValue = self.vector.local.tr
import itertools import random import fdb import fdb.tuple from pyDatalog import pyDatalog from datalog import Datalog, kvp fdb.api_version(300) db = fdb.open() # app is a subspace for an open directory app = fdb.directory.create_or_open(db, ('app,')) Datalog(app, 10) ############################ ## Relational Algebra ## ############################ pyDatalog.create_atoms('q,r,s,X,Y,Z') # Select r(X,'foo') # Project q(X) <= r(X,Y) # Join q(X, Y, Z) <= r(X, Y) & s(X, Z)
def connect(self) : fdb.api_version(100) self.__db = fdb.open(self._cluster, self._db, self._event_model)
# Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os import sys sys.path[:0] = [os.path.join(os.path.dirname(__file__), '..', '..', 'bindings', 'python')] import fdb from pubsub_bigdoc import PubSub fdb.api_version(14) db = fdb.open('/home/bbc/fdb.cluster', 'DB') del db[:] ps = PubSub(db) feed_a = ps.create_feed('alice') ps.print_feed_stats(feed_a) feed_b = ps.create_feed('bob') feed_x = ps.create_feed('bieber') inbox_a = ps.create_inbox('alice') inbox_b = ps.create_inbox('bob') inbox_x = ps.create_inbox('bieber')
from django.http import HttpResponseBadRequest from django.http import HttpResponseForbidden from django.http import HttpResponseNotFound from django.template.defaulttags import register from django.utils.html import format_html from django.http import JsonResponse import fdb import vnstore import nstore from .models import ChangeRequest from .models import Comment from .helpers import guess fdb.api_version(620) db = fdb.open() ITEMS = ['uid', 'key', 'value'] var = nstore.var # vnstore contains the versioned ITEMS vnstore = vnstore.open(['copernic', 'vnstore'], ITEMS) @register.filter def getattr(dictionary, key): return dictionary.get(key) @register.filter
# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os from flask import Flask import fdb app = Flask(__name__) fdb.api_version(int(os.getenv('FDB_API_VERSION'))) db = fdb.open() COUNTER_KEY = fdb.tuple.pack(('counter', )) def _increment_counter(tr): counter_value = tr[COUNTER_KEY] if counter_value == None: counter = 1 else: counter = fdb.tuple.unpack(counter_value)[0] + 1 tr[COUNTER_KEY] = fdb.tuple.pack((counter, )) return counter
import ctypes import math import sys import os import struct import threading import time import random import time import traceback sys.path[:0] = [os.path.join(os.path.dirname(__file__), '..')] import fdb fdb.api_version(int(sys.argv[2])) from fdb import six from fdb.impl import strinc import fdb.tuple from directory_extension import DirectoryExtension from cancellation_timeout_tests import test_timeouts from cancellation_timeout_tests import test_db_timeouts from cancellation_timeout_tests import test_cancellation from cancellation_timeout_tests import test_retry_limits from cancellation_timeout_tests import test_db_retry_limits from cancellation_timeout_tests import test_combinations from size_limit_tests import test_size_limit_option, test_get_approximate_size
def __init__(self, subspace) : fdb.api_version(100) self._db = fdb.open() self._directory = directory.create_or_open(self._db, ('twitter',)) if subspace != None : self._subspace = self._directory[subspace]
#! /usr/bin/python ''' This file provides a sample app for loading data into FDB. To use it to load data into one of the sample clusters in this repo, you can build the image by running `docker build -t fdb-data-loader sample-apps/data-loader`, and then run the data loader by running `kubectl apply -f sample-apps/data-loader/job.yaml` ''' import argparse import random import uuid import fdb fdb.api_version(600) @fdb.transactional def write_batch(tr, batch_size, value_size): prefix = uuid.uuid4() for index in range(1, batch_size + 1): key = fdb.tuple.pack((prefix, index)) value = [] for _ in range(0, value_size): value.append(random.randint(0, 255)) tr[key] = bytes(value) pass def load_data(keys, batch_size, value_size): batch_count = int(keys / batch_size)
# See the License for the specific language governing permissions and # limitations under the License. # import argparse import os import sys import time import traceback sys.path.append(os.path.join(os.path.dirname(__file__), '..')) from python_tests import PythonTest import fdb fdb.api_version(400) class RYWBenchmark(PythonTest): tests = { 'get_single': "RYW: get single cached value throughput", 'get_many_sequential': "RYW: get sequential cached values throughput", 'get_range_basic': "RYW: get range cached values throughput", 'single_clear_get_range': "RYW: get range cached values with clears throughput", 'clear_range_get_range': "RYW: get range cached values with clear ranges throughput", 'interleaved_sets_gets':
SimpleDoc also provides a powerful plugin capability that allows multiple levels of plugins to manipulate the logical-to-physical mapping of operations. The use of plugins is illustrated with an Index plugin that permits an application to create indexes on documents using a pattern-matching syntax. """ import json import threading import weakref import fdb import fdb.tuple from bisect import bisect_left fdb.api_version(100) doc_cache = weakref.WeakValueDictionary() ####### # Doc # ####### class Doc (object): """ Doc is the basic unit of data, representing a document (or nested dictionary). It provides functions to get, set, and clear documents. In addition, it provides JSON export capability as both a simple operation and a streaming operation appropriate for large documents. """
#!/usr/bin/env python import fdb import fdb.tuple fdb.api_version(21) db = fdb.open() @fdb.transactional def dump_tuples(tr, start, end): for k, v in tr[:]: print fdb.tuple.unpack(k), fdb.tuple.unpack(v) dump_tuples(db, '', '\xFF')
# limitations under the License. # import random import fdb from bindingtester import FDB_API_VERSION from bindingtester import util from bindingtester.tests import Test, Instruction, InstructionSet, ResultSpecification from bindingtester.tests import test_util, directory_util from bindingtester.tests.directory_state_tree import DirectoryStateTreeNode fdb.api_version(FDB_API_VERSION) class DirectoryTest(Test): def __init__(self, subspace): super(DirectoryTest, self).__init__(subspace) self.stack_subspace = subspace['stack'] self.directory_log = subspace['directory_log']['directory'] self.subspace_log = subspace['directory_log']['subspace'] self.prefix_log = subspace['prefix_log'] self.prepopulated_dirs = [] self.next_path = 1 def ensure_default_directory_subspace(self, instructions, path):
#!/usr/bin/env python import fdb import fdb.tuple fdb.api_version(21) db = fdb.open() @fdb.transactional def dump_tuples(tr, start, end): for k,v in tr[:]: print fdb.tuple.unpack(k), fdb.tuple.unpack(v) dump_tuples(db, '', '\xFF')