class MyClient(object): def __init__(self): self.cb = Connection(bucket='default') self.do_set() def on_op_error(self, msg): print "Got operation error!" + str(msg) def do_set(self): self.cb.set("foo", "bar").addCallback(self.on_set) def on_set(self, res): print res self.cb.get("foo").addCallback(self.on_get) def on_get(self, res): print res
class CBAsyncGen: TIMEOUT = 60 # seconds def __init__(self, use_ssl=False, **kwargs): self.client = TxConnection(quiet=True, **kwargs) self.client.timeout = self.TIMEOUT def create(self, key: str, doc: dict): return self.client.set(key, doc) def read(self, key: str): return self.client.get(key) def update(self, key: str, doc: dict): return self.client.set(key, doc) def delete(self, key: str): return self.client.delete(key)
class CBAsyncGen(object): def __init__(self, **kwargs): self.client = TxConnection(quiet=True, timeout=60, **kwargs) def create(self, key, doc, ttl=None): extra_params = {} if ttl is None: extra_params['ttl'] = ttl return self.client.set(key, doc, **extra_params) def read(self, key): return self.client.get(key) def update(self, key, doc): return self.client.set(key, doc) def cas(self, key, doc): cas = self.client.get(key).cas return self.client.set(key, doc, cas=cas) def delete(self, key): return self.client.delete(key)
class WorkloadGen: NUM_ITERATIONS = 5 def __init__(self, num_items, host, bucket, password, small=True): self.num_items = num_items if small: self.kv_cls = KeyValueIterator self.field_cls = NewFieldIterator else: self.kv_cls = KeyLargeValueIterator self.field_cls = NewLargeFieldIterator self.kv_iterator = self.kv_cls(self.num_items) self.field_iterator = self.field_cls(self.num_items) self.cb = Connection(bucket=bucket, host=host, password=password) self.fraction = 1 self.iteration = 0 def _interrupt(self, err): logger.interrupt(err.value) def _on_set(self, *args): self.counter += 1 if self.counter == self.kv_cls.BATCH_SIZE: self._set() def _set(self, *args): self.counter = 0 try: for k, v in self.kv_iterator.next(): d = self.cb.set(k, v) d.addCallback(self._on_set) d.addErrback(self._interrupt) except StopIteration: logger.info('Started iteration: {}-{}'.format(self.iteration, self.fraction)) self._append() def run(self): logger.info('Running initial load: {} items'.format(self.num_items)) d = self.cb.connect() d.addCallback(self._set) d.addErrback(self._interrupt) reactor.run() def _on_append(self, *args): self.counter += 1 if self.counter == self.field_cls.BATCH_SIZE: self._append() def _on_get(self, rv, f): v = rv.value v.append(f) d = self.cb.set(rv.key, v) d.addCallback(self._on_append) d.addErrback(self._interrupt) def _append(self, *args): self.counter = 0 try: for k, f in self.field_iterator.next(): d = self.cb.get(k) d.addCallback(self._on_get, f) d.addErrback(self._interrupt) except StopIteration: logger.info('Finished iteration: {}-{}'.format(self.iteration, self.fraction)) if self.fraction == 4: num_items = self.num_items self.fraction = 1 self.iteration += 1 if self.iteration == self.NUM_ITERATIONS: reactor.stop() else: self.fraction *= 2 num_items = self.num_items / self.fraction self.field_iterator = self.field_cls(num_items) logger.info('Started iteration: {}-{}'.format(self.iteration, self.fraction)) self._append()
def run_sync_example(): cb = Connection(bucket='default') rv_set = yield cb.set("foo", "bar") print rv_set rv_get = yield cb.get("foo") print rv_get
class AsyncGen(object): def __init__(self, iterator, conn): self.rng = random.Random(0) self.client = TxCouchbase(**conn) d = self.client.connect() d.addCallback(self.on_connect_success) d.addErrback(self.on_connect_error) self.iterator = iterator self.persons = (_ for _ in iterator) def on_connect_error(self, err): logger.info('Got error: {}'.format(err)) # Handle it, it's a normal Failure object self.client._close() err.trap() def on_connect_success(self, _): logger.info('Couchbase Connected!') self.process_next_person() def process_next_person(self): """Pick the next person from the graph; and add them to CB""" try: person = self.persons.next() except StopIteration: logger.info('StopIteration') reactor.stop() return value = self.iterator.person_to_value(self.rng, person) # Build list of append ops for multi_append ops = {} for friend in self.iterator.graph[person]: key = self.iterator.person_to_key(friend) ops[key] = ';' + value # Do the actual work. d = self.client.append_multi(ops, format=FMT_UTF8) d.addCallback(self._on_append) d.addErrback(self._on_multi_fail, ops) def _on_append(self, result): """Success, schedule next""" global count count += 1 self.process_next_person() def _on_multi_fail(self, err, ops): """Multi failed, crack and handle failures with set.""" err.trap(exceptions.NotStoredError, exceptions.TimeoutError) if err.check(exceptions.NotStoredError): # One or more keys do not yet exist, handle with set for k, v in err.value.all_results.items(): logger.info('VAL: {}'.format(err.value)) if not v.success: if v.rc == LCB_NOT_STORED: # Snip off semicolon for initial value. logger.info('SET: {} {}'.format(k, ops[k][1:])) d = self.client.set(k, ops[k][1:], format=FMT_UTF8) d.addCallback(self._on_set) d.addErrback(self._on_set_fail) elif err == exceptions.TimeoutError: logger.interrupt('Timeout: {}'.format(err)) else: logger.interrupt('Unhandled error: {}'.format(err)) def _on_set(self, result): pass def _on_set_fail(self, err): logger.interrupt('ON_SET_FAIL'.format(err))
class WorkloadGen: NUM_ITERATIONS = 5 def __init__(self, num_items, host, bucket, password, small=True): self.num_items = num_items if small: self.kv_cls = KeyValueIterator self.field_cls = NewFieldIterator else: self.kv_cls = KeyLargeValueIterator self.field_cls = NewLargeFieldIterator self.kv_iterator = self.kv_cls(self.num_items) self.field_iterator = self.field_cls(self.num_items) self.cb = Connection(bucket=bucket, host=host, password=password) self.fraction = 1 self.iteration = 0 def _interrupt(self, err): logger.interrupt(err.value) def _on_set(self, *args): self.counter += 1 if self.counter == self.kv_cls.BATCH_SIZE: self._set() def _set(self, *args): self.counter = 0 try: for k, v in self.kv_iterator.next(): d = self.cb.set(k, v) d.addCallback(self._on_set) d.addErrback(self._interrupt) except StopIteration: logger.info('Started iteration: {}-{}'.format( self.iteration, self.fraction)) self._append() def run(self): logger.info('Running initial load: {} items'.format(self.num_items)) d = self.cb.connect() d.addCallback(self._set) d.addErrback(self._interrupt) reactor.run() def _on_append(self, *args): self.counter += 1 if self.counter == self.field_cls.BATCH_SIZE: self._append() def _on_get(self, rv, f): v = rv.value v.append(f) d = self.cb.set(rv.key, v) d.addCallback(self._on_append) d.addErrback(self._interrupt) def _append(self, *args): self.counter = 0 try: for k, f in self.field_iterator.next(): d = self.cb.get(k) d.addCallback(self._on_get, f) d.addErrback(self._interrupt) except StopIteration: logger.info('Finished iteration: {}-{}'.format( self.iteration, self.fraction)) if self.fraction == 4: num_items = self.num_items self.fraction = 1 self.iteration += 1 if self.iteration == self.NUM_ITERATIONS: reactor.stop() else: self.fraction *= 2 num_items = self.num_items / self.fraction self.field_iterator = self.field_cls(num_items) logger.info('Started iteration: {}-{}'.format( self.iteration, self.fraction)) self._append()
class WorkloadGen: NUM_ITERATIONS = 5 def __init__(self, num_items, host, bucket, password, collections=None, small=True): if collections: self.use_collection = True else: self.use_collection = False self.num_items = num_items if small: self.kv_cls = KeyValueIterator self.field_cls = NewFieldIterator else: self.kv_cls = KeyLargeValueIterator self.field_cls = NewLargeFieldIterator self.kv_iterator = self.kv_cls(self.num_items) self.field_iterator = self.field_cls(self.num_items) cb_version = pkg_resources.get_distribution("couchbase").version if cb_version[0] == '2': self.cb = Connection(bucket=bucket, host=host, password=password) elif cb_version[0] == '3': connection_string = 'couchbase://{host}?password={password}' connection_string = connection_string.format(host=host, password=password) pass_auth = PasswordAuthenticator(bucket, password) self.cluster = TxCluster(connection_string=connection_string, options=ClusterOptions(pass_auth)) self.bucket = self.cluster.bucket(bucket) self.collection = self.bucket.scope("scope-1").collection( "collection-1") self.fraction = 1 self.iteration = 0 def _interrupt(self, err): logger.interrupt(err.value) def _on_set(self, *args): self.counter += 1 if self.counter == self.kv_cls.BATCH_SIZE: self._set() def _set(self, *args): self.counter = 0 try: for k, v in self.kv_iterator.next(): if self.use_collection: d = self.collection.upsert(k, v) d.addCallback(self._on_set) d.addErrback(self._interrupt) else: d = self.cb.set(k, v) d.addCallback(self._on_set) d.addErrback(self._interrupt) except StopIteration: logger.info('Started iteration: {}-{}'.format( self.iteration, self.fraction)) self._append() def run(self): if self.use_collection: logger.info('Running initial load: {} items per collection'.format( self.num_items)) d = self.bucket.on_connect() d.addCallback(self._set) d.addErrback(self._interrupt) else: logger.info('Running initial load: {} items'.format( self.num_items)) d = self.cb.connect() d.addCallback(self._set) d.addErrback(self._interrupt) reactor.run() def _on_append(self, *args): self.counter += 1 if self.counter == self.field_cls.BATCH_SIZE: self._append() def _on_get(self, rv, f, key=None): if self.use_collection: v = rv.content v.append(f) d = self.collection.upsert(key, v) d.addCallback(self._on_append) d.addErrback(self._interrupt) else: v = rv.value v.append(f) d = self.cb.set(rv.key, v) d.addCallback(self._on_append) d.addErrback(self._interrupt) def _append(self, *args): self.counter = 0 try: for k, f in self.field_iterator.next(): if self.use_collection: d = self.collection.get(k) d.addCallback(self._on_get, f, k) d.addErrback(self._interrupt) else: d = self.cb.get(k) d.addCallback(self._on_get, f) d.addErrback(self._interrupt) except StopIteration: logger.info('Finished iteration: {}-{}'.format( self.iteration, self.fraction)) if self.fraction == 4: num_items = self.num_items self.fraction = 1 self.iteration += 1 if self.iteration == self.NUM_ITERATIONS: reactor.stop() else: self.fraction *= 2 num_items = self.num_items / self.fraction self.field_iterator = self.field_cls(num_items) logger.info('Started iteration: {}-{}'.format( self.iteration, self.fraction)) self._append()
from twisted.internet import reactor from couchbase import experimental experimental.enable() from txcouchbase.connection import Connection as TxCouchbase cb = TxCouchbase(bucket='default') def on_set(ret): print("Set key. Result", ret) def on_get(ret): print("Got key. Result", ret) reactor.stop() cb.set("key", "value").addCallback(on_set) cb.get("key").addCallback(on_get) reactor.run()
from twisted.internet import reactor from txcouchbase.connection import Connection as TxCouchbase cb = TxCouchbase(bucket='default') def on_set(ret): print("Set key. Result", ret) def on_get(ret): print("Got key. Result", ret) reactor.stop() cb.set("key", "value").addCallback(on_set) cb.get("key").addCallback(on_get) reactor.run()