예제 #1
0
 def check_dataloss(self, server, bucket):
     from couchbase.bucket import Bucket
     from couchbase.exceptions import NotFoundError
     from lib.memcached.helper.data_helper import VBucketAwareMemcached
     bkt = Bucket('couchbase://{0}/{1}'.format(server.ip, bucket.name))
     rest = RestConnection(self.master)
     VBucketAware = VBucketAwareMemcached(rest, bucket.name)
     _, _, _ = VBucketAware.request_map(rest, bucket.name)
     batch_start = 0
     batch_end = 0
     batch_size = 10000
     errors = []
     while self.num_items > batch_end:
         batch_end = batch_start + batch_size
         keys = []
         for i in xrange(batch_start, batch_end, 1):
             keys.append(str(i).rjust(20, '0'))
         try:
             bkt.get_multi(keys)
             self.log.info("Able to fetch keys starting from {0} to {1}".format(keys[0], keys[len(keys)-1]))
         except Exception as e:
             self.log.error(e)
             self.log.info("Now trying keys in the batch one at a time...")
             key = ''
             try:
                 for key in keys:
                     bkt.get(key)
             except NotFoundError:
                 vBucketId = VBucketAware._get_vBucket_id(key)
                 errors.append("Missing key: {0}, VBucketId: {1}".
                               format(key, vBucketId))
         batch_start += batch_size
     return errors
예제 #2
0
 def check_dataloss(self, server, bucket):
     from couchbase.bucket import Bucket
     from couchbase.exceptions import NotFoundError
     from lib.memcached.helper.data_helper import VBucketAwareMemcached
     bkt = Bucket('couchbase://{0}/{1}'.format(server.ip, bucket.name), username="******", password="******")
     rest = RestConnection(self.master)
     VBucketAware = VBucketAwareMemcached(rest, bucket.name)
     _, _, _ = VBucketAware.request_map(rest, bucket.name)
     batch_start = 0
     batch_end = 0
     batch_size = 10000
     errors = []
     while self.num_items > batch_end:
         batch_end = batch_start + batch_size
         keys = []
         for i in xrange(batch_start, batch_end, 1):
             keys.append(str(i).rjust(20, '0'))
         try:
             bkt.get_multi(keys)
             self.log.info("Able to fetch keys starting from {0} to {1}".format(keys[0], keys[len(keys)-1]))
         except Exception as e:
             self.log.error(e)
             self.log.info("Now trying keys in the batch one at a time...")
             key = ''
             try:
                 for key in keys:
                     bkt.get(key)
             except NotFoundError:
                 vBucketId = VBucketAware._get_vBucket_id(key)
                 errors.append("Missing key: {0}, VBucketId: {1}".
                               format(key, vBucketId))
         batch_start += batch_size
     return errors
예제 #3
0
 def check_dataloss(self, server, bucket, num_items):
     from couchbase.bucket import Bucket
     from couchbase.exceptions import NotFoundError, CouchbaseError
     from lib.memcached.helper.data_helper import VBucketAwareMemcached
     self.log.info(
         "########## validating data for bucket : {} ###########".format(
             bucket))
     cb_version = cb_version = RestConnection(
         server).get_nodes_version()[:3]
     if cb_version < "5":
         bkt = Bucket('couchbase://{0}/{1}'.format(server.ip, bucket.name),
                      timeout=5000)
     else:
         bkt = Bucket('couchbase://{0}/{1}'.format(server.ip, bucket.name),
                      username=server.rest_username,
                      password=server.rest_password,
                      timeout=5000)
     rest = RestConnection(self.master)
     VBucketAware = VBucketAwareMemcached(rest, bucket.name)
     _, _, _ = VBucketAware.request_map(rest, bucket.name)
     batch_start = 0
     batch_end = 0
     batch_size = 10000
     errors = []
     while num_items > batch_end:
         batch_end = batch_start + batch_size
         keys = []
         for i in xrange(batch_start, batch_end, 1):
             keys.append(str(i).rjust(20, '0'))
         try:
             bkt.get_multi(keys)
             self.log.info(
                 "Able to fetch keys starting from {0} to {1}".format(
                     keys[0], keys[len(keys) - 1]))
         except CouchbaseError as e:
             self.log.error(e)
             ok, fail = e.split_results()
             if fail:
                 for key in fail:
                     try:
                         bkt.get(key)
                     except NotFoundError:
                         vBucketId = VBucketAware._get_vBucket_id(key)
                         errors.append(
                             "Missing key: {0}, VBucketId: {1}".format(
                                 key, vBucketId))
         batch_start += batch_size
     self.log.info("Total missing keys:{}".format(len(errors)))
     self.log.info(errors)
     return errors
예제 #4
0
def check_dataloss(ip, bucket, num_items):
    from couchbase.bucket import Bucket
    from couchbase.exceptions import NotFoundError
    bkt = Bucket('couchbase://{0}/{1}'.format(ip, bucket),
                 username="******",
                 password="******")
    batch_start = 2000000
    batch_end = 0
    batch_size = 10000
    errors = []
    missing_keys = []
    errors_replica = []
    missing_keys_replica = []
    while num_items > batch_end:
        batch_end = batch_start + batch_size
        keys = []
        for i in xrange(batch_start, batch_end, 1):
            keys.append(str(i))
        try:
            bkt.get_multi(keys)
            print("Able to fetch keys starting from {0} to {1}".format(
                keys[0], keys[len(keys) - 1]))
        except CouchbaseError as e:
            print(e)
            ok, fail = e.split_results()
            if fail:
                for key in fail:
                    try:
                        bkt.get(key)
                    except NotFoundError:
                        errors.append("Missing key: {0}".format(key))
                        missing_keys.append(key)
        try:
            bkt.get_multi(keys, replica=True)
            print("Able to fetch keys starting from {0} to {1} in replica ".
                  format(keys[0], keys[len(keys) - 1]))
        except (CouchbaseError, CouchbaseNetworkError,
                CouchbaseTransientError) as e:
            print(e)
            ok, fail = e.split_results()
            if fail:
                for key in fail:
                    try:
                        bkt.get(key)
                    except NotFoundError:
                        errors_replica.append("Missing key: {0}".format(key))
                        missing_keys_replica.append(key)
        batch_start += batch_size
    return errors, missing_keys, errors_replica, missing_keys_replica
def get_documents_by_keys(
    bucket: Bucket, *, keys: List[str], doc_model=Type[BaseModel]
):
    results = bucket.get_multi(keys, quiet=True)
    docs = []
    for result in results.values():
        doc = doc_model(**result.value)
        docs.append(doc)
    return docs
예제 #6
0
 def check_dataloss(self, server, bucket, num_items):
     from couchbase.bucket import Bucket
     from couchbase.exceptions import NotFoundError,CouchbaseError
     from lib.memcached.helper.data_helper import VBucketAwareMemcached
     self.log.info("########## validating data for bucket : {} ###########".format(bucket))
     cb_version= cb_version = RestConnection(server).get_nodes_version()[:3]
     if cb_version < "5":
         bkt = Bucket('couchbase://{0}/{1}'.format(server.ip, bucket.name),timeout=5000)
     else:
         bkt = Bucket('couchbase://{0}/{1}'.format(server.ip, bucket.name),username=server.rest_username,
                      password=server.rest_password,timeout=5000)
     rest = RestConnection(self.master)
     VBucketAware = VBucketAwareMemcached(rest, bucket.name)
     _, _, _ = VBucketAware.request_map(rest, bucket.name)
     batch_start = 0
     batch_end = 0
     batch_size = 10000
     errors = []
     while num_items > batch_end:
         batch_end = batch_start + batch_size
         keys = []
         for i in xrange(batch_start, batch_end, 1):
             keys.append(str(i).rjust(20, '0'))
         try:
             bkt.get_multi(keys)
             self.log.info("Able to fetch keys starting from {0} to {1}".format(keys[0], keys[len(keys) - 1]))
         except CouchbaseError as e:
             self.log.error(e)
             ok, fail = e.split_results()
             if fail:
                 for key in fail:
                     try:
                         bkt.get(key)
                     except NotFoundError:
                         vBucketId = VBucketAware._get_vBucket_id(key)
                         errors.append("Missing key: {0}, VBucketId: {1}".
                                       format(key, vBucketId))
         batch_start += batch_size
     self.log.info("Total missing keys:{}".format(len(errors)))
     self.log.info(errors)
     return errors
예제 #7
0
 def getByView(self, parameter):
     bucket = Bucket(self._bucketUrl)
     options = Query()
     options.mapkey_range = (str(parameter), str(parameter))
     options.stale = False
     rows = bucket.query(self.designDocument, self._viewName, query=options)
     # the resulting row view from bucket.query is [key, value, docid, doc]
     # since we want docids, select the elements with index 2
     docids = [row[2] for row in rows]
     if len(docids) == 0:
         return []
     results = bucket.get_multi(docids).values()
     return [result.value for result in results]
예제 #8
0
class TestStandardCouchDB(unittest.TestCase):
    def setup_class(self):
        """ Clear all spans before a test run """
        self.recorder = tracer.recorder
        self.cluster = Cluster('couchbase://%s' % testenv['couchdb_host'])
        self.bucket = Bucket('couchbase://%s/travel-sample' %
                             testenv['couchdb_host'],
                             username=testenv['couchdb_username'],
                             password=testenv['couchdb_password'])

    def setup_method(self):
        self.bucket.upsert('test-key', 1)
        time.sleep(0.5)
        self.recorder.clear_spans()

    def test_vanilla_get(self):
        res = self.bucket.get("test-key")
        assert (res)

    def test_pipeline(self):
        pass

    def test_upsert(self):
        res = None
        with tracer.start_active_span('test'):
            res = self.bucket.upsert("test_upsert", 1)

        assert (res)
        self.assertTrue(res.success)

        spans = self.recorder.queued_spans()
        self.assertEqual(2, len(spans))

        test_span = get_first_span_by_name(spans, 'sdk')
        assert (test_span)
        self.assertEqual(test_span.data["sdk"]["name"], 'test')

        cb_span = get_first_span_by_name(spans, 'couchbase')
        assert (cb_span)

        # Same traceId and parent relationship
        self.assertEqual(test_span.t, cb_span.t)
        self.assertEqual(cb_span.p, test_span.s)

        assert (cb_span.stack)
        self.assertIsNone(cb_span.ec)

        self.assertEqual(cb_span.data["couchbase"]["hostname"],
                         "%s:8091" % testenv['couchdb_host'])
        self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample')
        self.assertEqual(cb_span.data["couchbase"]["type"], 'upsert')

    def test_upsert_multi(self):
        res = None

        kvs = dict()
        kvs['first_test_upsert_multi'] = 1
        kvs['second_test_upsert_multi'] = 1

        with tracer.start_active_span('test'):
            res = self.bucket.upsert_multi(kvs)

        assert (res)
        self.assertTrue(res['first_test_upsert_multi'].success)
        self.assertTrue(res['second_test_upsert_multi'].success)

        spans = self.recorder.queued_spans()
        self.assertEqual(2, len(spans))

        test_span = get_first_span_by_name(spans, 'sdk')
        assert (test_span)
        self.assertEqual(test_span.data["sdk"]["name"], 'test')

        cb_span = get_first_span_by_name(spans, 'couchbase')
        assert (cb_span)

        # Same traceId and parent relationship
        self.assertEqual(test_span.t, cb_span.t)
        self.assertEqual(cb_span.p, test_span.s)

        assert (cb_span.stack)
        self.assertIsNone(cb_span.ec)

        self.assertEqual(cb_span.data["couchbase"]["hostname"],
                         "%s:8091" % testenv['couchdb_host'])
        self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample')
        self.assertEqual(cb_span.data["couchbase"]["type"], 'upsert_multi')

    def test_insert_new(self):
        res = None
        try:
            self.bucket.remove('test_insert_new')
        except NotFoundError:
            pass

        with tracer.start_active_span('test'):
            res = self.bucket.insert("test_insert_new", 1)

        assert (res)
        self.assertTrue(res.success)

        spans = self.recorder.queued_spans()
        self.assertEqual(2, len(spans))

        test_span = get_first_span_by_name(spans, 'sdk')
        assert (test_span)
        self.assertEqual(test_span.data["sdk"]["name"], 'test')

        cb_span = get_first_span_by_name(spans, 'couchbase')
        assert (cb_span)

        # Same traceId and parent relationship
        self.assertEqual(test_span.t, cb_span.t)
        self.assertEqual(cb_span.p, test_span.s)

        assert (cb_span.stack)
        self.assertIsNone(cb_span.ec)

        self.assertEqual(cb_span.data["couchbase"]["hostname"],
                         "%s:8091" % testenv['couchdb_host'])
        self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample')
        self.assertEqual(cb_span.data["couchbase"]["type"], 'insert')

    def test_insert_existing(self):
        res = None
        try:
            self.bucket.insert("test_insert", 1)
        except KeyExistsError:
            pass

        try:
            with tracer.start_active_span('test'):
                res = self.bucket.insert("test_insert", 1)
        except KeyExistsError:
            pass

        self.assertIsNone(res)

        spans = self.recorder.queued_spans()
        self.assertEqual(2, len(spans))

        test_span = get_first_span_by_name(spans, 'sdk')
        assert (test_span)
        self.assertEqual(test_span.data["sdk"]["name"], 'test')

        cb_span = get_first_span_by_name(spans, 'couchbase')
        assert (cb_span)

        # Same traceId and parent relationship
        self.assertEqual(test_span.t, cb_span.t)
        self.assertEqual(cb_span.p, test_span.s)

        assert (cb_span.stack)
        self.assertEqual(cb_span.ec, 1)
        # Just search for the substring of the exception class
        found = cb_span.data["couchbase"]["error"].find("_KeyExistsError")
        self.assertFalse(found == -1, "Error substring not found.")

        self.assertEqual(cb_span.data["couchbase"]["hostname"],
                         "%s:8091" % testenv['couchdb_host'])
        self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample')
        self.assertEqual(cb_span.data["couchbase"]["type"], 'insert')

    def test_insert_multi(self):
        res = None

        kvs = dict()
        kvs['first_test_upsert_multi'] = 1
        kvs['second_test_upsert_multi'] = 1

        try:
            self.bucket.remove('first_test_upsert_multi')
            self.bucket.remove('second_test_upsert_multi')
        except NotFoundError:
            pass

        with tracer.start_active_span('test'):
            res = self.bucket.insert_multi(kvs)

        assert (res)
        self.assertTrue(res['first_test_upsert_multi'].success)
        self.assertTrue(res['second_test_upsert_multi'].success)

        spans = self.recorder.queued_spans()
        self.assertEqual(2, len(spans))

        test_span = get_first_span_by_name(spans, 'sdk')
        assert (test_span)
        self.assertEqual(test_span.data["sdk"]["name"], 'test')

        cb_span = get_first_span_by_name(spans, 'couchbase')
        assert (cb_span)

        # Same traceId and parent relationship
        self.assertEqual(test_span.t, cb_span.t)
        self.assertEqual(cb_span.p, test_span.s)

        assert (cb_span.stack)
        self.assertIsNone(cb_span.ec)

        self.assertEqual(cb_span.data["couchbase"]["hostname"],
                         "%s:8091" % testenv['couchdb_host'])
        self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample')
        self.assertEqual(cb_span.data["couchbase"]["type"], 'insert_multi')

    def test_replace(self):
        res = None
        try:
            self.bucket.insert("test_replace", 1)
        except KeyExistsError:
            pass

        with tracer.start_active_span('test'):
            res = self.bucket.replace("test_replace", 2)

        assert (res)
        self.assertTrue(res.success)

        spans = self.recorder.queued_spans()
        self.assertEqual(2, len(spans))

        test_span = get_first_span_by_name(spans, 'sdk')
        assert (test_span)
        self.assertEqual(test_span.data["sdk"]["name"], 'test')

        cb_span = get_first_span_by_name(spans, 'couchbase')
        assert (cb_span)

        # Same traceId and parent relationship
        self.assertEqual(test_span.t, cb_span.t)
        self.assertEqual(cb_span.p, test_span.s)

        assert (cb_span.stack)
        self.assertIsNone(cb_span.ec)

        self.assertEqual(cb_span.data["couchbase"]["hostname"],
                         "%s:8091" % testenv['couchdb_host'])
        self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample')
        self.assertEqual(cb_span.data["couchbase"]["type"], 'replace')

    def test_replace_non_existent(self):
        res = None

        try:
            self.bucket.remove("test_replace")
        except NotFoundError:
            pass

        try:
            with tracer.start_active_span('test'):
                res = self.bucket.replace("test_replace", 2)
        except NotFoundError:
            pass

        self.assertIsNone(res)

        spans = self.recorder.queued_spans()
        self.assertEqual(2, len(spans))

        test_span = get_first_span_by_name(spans, 'sdk')
        assert (test_span)
        self.assertEqual(test_span.data["sdk"]["name"], 'test')

        cb_span = get_first_span_by_name(spans, 'couchbase')
        assert (cb_span)

        # Same traceId and parent relationship
        self.assertEqual(test_span.t, cb_span.t)
        self.assertEqual(cb_span.p, test_span.s)

        assert (cb_span.stack)
        self.assertEqual(cb_span.ec, 1)
        # Just search for the substring of the exception class
        found = cb_span.data["couchbase"]["error"].find("NotFoundError")
        self.assertFalse(found == -1, "Error substring not found.")

        self.assertEqual(cb_span.data["couchbase"]["hostname"],
                         "%s:8091" % testenv['couchdb_host'])
        self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample')
        self.assertEqual(cb_span.data["couchbase"]["type"], 'replace')

    def test_replace_multi(self):
        res = None

        kvs = dict()
        kvs['first_test_replace_multi'] = 1
        kvs['second_test_replace_multi'] = 1

        self.bucket.upsert('first_test_replace_multi', "one")
        self.bucket.upsert('second_test_replace_multi', "two")

        with tracer.start_active_span('test'):
            res = self.bucket.replace_multi(kvs)

        assert (res)
        self.assertTrue(res['first_test_replace_multi'].success)
        self.assertTrue(res['second_test_replace_multi'].success)

        spans = self.recorder.queued_spans()
        self.assertEqual(2, len(spans))

        test_span = get_first_span_by_name(spans, 'sdk')
        assert (test_span)
        self.assertEqual(test_span.data["sdk"]["name"], 'test')

        cb_span = get_first_span_by_name(spans, 'couchbase')
        assert (cb_span)

        # Same traceId and parent relationship
        self.assertEqual(test_span.t, cb_span.t)
        self.assertEqual(cb_span.p, test_span.s)

        assert (cb_span.stack)
        self.assertIsNone(cb_span.ec)

        self.assertEqual(cb_span.data["couchbase"]["hostname"],
                         "%s:8091" % testenv['couchdb_host'])
        self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample')
        self.assertEqual(cb_span.data["couchbase"]["type"], 'replace_multi')

    def test_append(self):
        self.bucket.upsert("test_append", "one")

        res = None
        with tracer.start_active_span('test'):
            res = self.bucket.append("test_append", "two")

        assert (res)
        self.assertTrue(res.success)

        spans = self.recorder.queued_spans()
        self.assertEqual(2, len(spans))

        test_span = get_first_span_by_name(spans, 'sdk')
        assert (test_span)
        self.assertEqual(test_span.data["sdk"]["name"], 'test')

        cb_span = get_first_span_by_name(spans, 'couchbase')
        assert (cb_span)

        # Same traceId and parent relationship
        self.assertEqual(test_span.t, cb_span.t)
        self.assertEqual(cb_span.p, test_span.s)

        assert (cb_span.stack)
        self.assertIsNone(cb_span.ec)

        self.assertEqual(cb_span.data["couchbase"]["hostname"],
                         "%s:8091" % testenv['couchdb_host'])
        self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample')
        self.assertEqual(cb_span.data["couchbase"]["type"], 'append')

    def test_append_multi(self):
        res = None

        kvs = dict()
        kvs['first_test_append_multi'] = "ok1"
        kvs['second_test_append_multi'] = "ok2"

        self.bucket.upsert('first_test_append_multi', "one")
        self.bucket.upsert('second_test_append_multi', "two")

        with tracer.start_active_span('test'):
            res = self.bucket.append_multi(kvs)

        assert (res)
        self.assertTrue(res['first_test_append_multi'].success)
        self.assertTrue(res['second_test_append_multi'].success)

        spans = self.recorder.queued_spans()
        self.assertEqual(2, len(spans))

        test_span = get_first_span_by_name(spans, 'sdk')
        assert (test_span)
        self.assertEqual(test_span.data["sdk"]["name"], 'test')

        cb_span = get_first_span_by_name(spans, 'couchbase')
        assert (cb_span)

        # Same traceId and parent relationship
        self.assertEqual(test_span.t, cb_span.t)
        self.assertEqual(cb_span.p, test_span.s)

        assert (cb_span.stack)
        self.assertIsNone(cb_span.ec)

        self.assertEqual(cb_span.data["couchbase"]["hostname"],
                         "%s:8091" % testenv['couchdb_host'])
        self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample')
        self.assertEqual(cb_span.data["couchbase"]["type"], 'append_multi')

    def test_prepend(self):
        self.bucket.upsert("test_prepend", "one")

        res = None
        with tracer.start_active_span('test'):
            res = self.bucket.prepend("test_prepend", "two")

        assert (res)
        self.assertTrue(res.success)

        spans = self.recorder.queued_spans()
        self.assertEqual(2, len(spans))

        test_span = get_first_span_by_name(spans, 'sdk')
        assert (test_span)
        self.assertEqual(test_span.data["sdk"]["name"], 'test')

        cb_span = get_first_span_by_name(spans, 'couchbase')
        assert (cb_span)

        # Same traceId and parent relationship
        self.assertEqual(test_span.t, cb_span.t)
        self.assertEqual(cb_span.p, test_span.s)

        assert (cb_span.stack)
        self.assertIsNone(cb_span.ec)

        self.assertEqual(cb_span.data["couchbase"]["hostname"],
                         "%s:8091" % testenv['couchdb_host'])
        self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample')
        self.assertEqual(cb_span.data["couchbase"]["type"], 'prepend')

    def test_prepend_multi(self):
        res = None

        kvs = dict()
        kvs['first_test_prepend_multi'] = "ok1"
        kvs['second_test_prepend_multi'] = "ok2"

        self.bucket.upsert('first_test_prepend_multi', "one")
        self.bucket.upsert('second_test_prepend_multi', "two")

        with tracer.start_active_span('test'):
            res = self.bucket.prepend_multi(kvs)

        assert (res)
        self.assertTrue(res['first_test_prepend_multi'].success)
        self.assertTrue(res['second_test_prepend_multi'].success)

        spans = self.recorder.queued_spans()
        self.assertEqual(2, len(spans))

        test_span = get_first_span_by_name(spans, 'sdk')
        assert (test_span)
        self.assertEqual(test_span.data["sdk"]["name"], 'test')

        cb_span = get_first_span_by_name(spans, 'couchbase')
        assert (cb_span)

        # Same traceId and parent relationship
        self.assertEqual(test_span.t, cb_span.t)
        self.assertEqual(cb_span.p, test_span.s)

        assert (cb_span.stack)
        self.assertIsNone(cb_span.ec)

        self.assertEqual(cb_span.data["couchbase"]["hostname"],
                         "%s:8091" % testenv['couchdb_host'])
        self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample')
        self.assertEqual(cb_span.data["couchbase"]["type"], 'prepend_multi')

    def test_get(self):
        res = None

        with tracer.start_active_span('test'):
            res = self.bucket.get("test-key")

        assert (res)
        self.assertTrue(res.success)

        spans = self.recorder.queued_spans()
        self.assertEqual(2, len(spans))

        test_span = get_first_span_by_name(spans, 'sdk')
        assert (test_span)
        self.assertEqual(test_span.data["sdk"]["name"], 'test')

        cb_span = get_first_span_by_name(spans, 'couchbase')
        assert (cb_span)

        # Same traceId and parent relationship
        self.assertEqual(test_span.t, cb_span.t)
        self.assertEqual(cb_span.p, test_span.s)

        assert (cb_span.stack)
        self.assertIsNone(cb_span.ec)

        self.assertEqual(cb_span.data["couchbase"]["hostname"],
                         "%s:8091" % testenv['couchdb_host'])
        self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample')
        self.assertEqual(cb_span.data["couchbase"]["type"], 'get')

    def test_rget(self):
        res = None

        try:
            with tracer.start_active_span('test'):
                res = self.bucket.rget("test-key", replica_index=None)
        except CouchbaseTransientError:
            pass

        self.assertIsNone(res)

        spans = self.recorder.queued_spans()
        self.assertEqual(2, len(spans))

        test_span = get_first_span_by_name(spans, 'sdk')
        assert (test_span)
        self.assertEqual(test_span.data["sdk"]["name"], 'test')

        cb_span = get_first_span_by_name(spans, 'couchbase')
        assert (cb_span)

        # Same traceId and parent relationship
        self.assertEqual(test_span.t, cb_span.t)
        self.assertEqual(cb_span.p, test_span.s)

        assert (cb_span.stack)
        self.assertEqual(cb_span.ec, 1)
        # Just search for the substring of the exception class
        found = cb_span.data["couchbase"]["error"].find(
            "CouchbaseTransientError")
        self.assertFalse(found == -1, "Error substring not found.")

        self.assertEqual(cb_span.data["couchbase"]["hostname"],
                         "%s:8091" % testenv['couchdb_host'])
        self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample')
        self.assertEqual(cb_span.data["couchbase"]["type"], 'rget')

    def test_get_not_found(self):
        res = None
        try:
            self.bucket.remove('test_get_not_found')
        except NotFoundError:
            pass

        try:
            with tracer.start_active_span('test'):
                res = self.bucket.get("test_get_not_found")
        except NotFoundError:
            pass

        self.assertIsNone(res)

        spans = self.recorder.queued_spans()
        self.assertEqual(2, len(spans))

        test_span = get_first_span_by_name(spans, 'sdk')
        assert (test_span)
        self.assertEqual(test_span.data["sdk"]["name"], 'test')

        cb_span = get_first_span_by_name(spans, 'couchbase')
        assert (cb_span)

        # Same traceId and parent relationship
        self.assertEqual(test_span.t, cb_span.t)
        self.assertEqual(cb_span.p, test_span.s)

        assert (cb_span.stack)
        self.assertEqual(cb_span.ec, 1)
        # Just search for the substring of the exception class
        found = cb_span.data["couchbase"]["error"].find("NotFoundError")
        self.assertFalse(found == -1, "Error substring not found.")

        self.assertEqual(cb_span.data["couchbase"]["hostname"],
                         "%s:8091" % testenv['couchdb_host'])
        self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample')
        self.assertEqual(cb_span.data["couchbase"]["type"], 'get')

    def test_get_multi(self):
        res = None

        self.bucket.upsert('first_test_get_multi', "one")
        self.bucket.upsert('second_test_get_multi', "two")

        with tracer.start_active_span('test'):
            res = self.bucket.get_multi(
                ['first_test_get_multi', 'second_test_get_multi'])

        assert (res)
        self.assertTrue(res['first_test_get_multi'].success)
        self.assertTrue(res['second_test_get_multi'].success)

        spans = self.recorder.queued_spans()
        self.assertEqual(2, len(spans))

        test_span = get_first_span_by_name(spans, 'sdk')
        assert (test_span)
        self.assertEqual(test_span.data["sdk"]["name"], 'test')

        cb_span = get_first_span_by_name(spans, 'couchbase')
        assert (cb_span)

        # Same traceId and parent relationship
        self.assertEqual(test_span.t, cb_span.t)
        self.assertEqual(cb_span.p, test_span.s)

        assert (cb_span.stack)
        self.assertIsNone(cb_span.ec)

        self.assertEqual(cb_span.data["couchbase"]["hostname"],
                         "%s:8091" % testenv['couchdb_host'])
        self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample')
        self.assertEqual(cb_span.data["couchbase"]["type"], 'get_multi')

    def test_touch(self):
        res = None
        self.bucket.upsert("test_touch", 1)

        with tracer.start_active_span('test'):
            res = self.bucket.touch("test_touch")

        assert (res)
        self.assertTrue(res.success)

        spans = self.recorder.queued_spans()
        self.assertEqual(2, len(spans))

        test_span = get_first_span_by_name(spans, 'sdk')
        assert (test_span)
        self.assertEqual(test_span.data["sdk"]["name"], 'test')

        cb_span = get_first_span_by_name(spans, 'couchbase')
        assert (cb_span)

        # Same traceId and parent relationship
        self.assertEqual(test_span.t, cb_span.t)
        self.assertEqual(cb_span.p, test_span.s)

        assert (cb_span.stack)
        self.assertIsNone(cb_span.ec)

        self.assertEqual(cb_span.data["couchbase"]["hostname"],
                         "%s:8091" % testenv['couchdb_host'])
        self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample')
        self.assertEqual(cb_span.data["couchbase"]["type"], 'touch')

    def test_touch_multi(self):
        res = None

        self.bucket.upsert('first_test_touch_multi', "one")
        self.bucket.upsert('second_test_touch_multi', "two")

        with tracer.start_active_span('test'):
            res = self.bucket.touch_multi(
                ['first_test_touch_multi', 'second_test_touch_multi'])

        assert (res)
        self.assertTrue(res['first_test_touch_multi'].success)
        self.assertTrue(res['second_test_touch_multi'].success)

        spans = self.recorder.queued_spans()
        self.assertEqual(2, len(spans))

        test_span = get_first_span_by_name(spans, 'sdk')
        assert (test_span)
        self.assertEqual(test_span.data["sdk"]["name"], 'test')

        cb_span = get_first_span_by_name(spans, 'couchbase')
        assert (cb_span)

        # Same traceId and parent relationship
        self.assertEqual(test_span.t, cb_span.t)
        self.assertEqual(cb_span.p, test_span.s)

        assert (cb_span.stack)
        self.assertIsNone(cb_span.ec)

        self.assertEqual(cb_span.data["couchbase"]["hostname"],
                         "%s:8091" % testenv['couchdb_host'])
        self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample')
        self.assertEqual(cb_span.data["couchbase"]["type"], 'touch_multi')

    def test_lock(self):
        res = None
        self.bucket.upsert("test_lock_unlock", "lock_this")

        with tracer.start_active_span('test'):
            rv = self.bucket.lock("test_lock_unlock", ttl=5)
            assert (rv)
            self.assertTrue(rv.success)

            # upsert automatically unlocks the key
            res = self.bucket.upsert("test_lock_unlock", "updated", rv.cas)
            assert (res)
            self.assertTrue(res.success)

        spans = self.recorder.queued_spans()
        self.assertEqual(3, len(spans))

        test_span = get_first_span_by_name(spans, 'sdk')
        assert (test_span)
        self.assertEqual(test_span.data["sdk"]["name"], 'test')

        filter = lambda span: span.n == "couchbase" and span.data["couchbase"][
            "type"] == "lock"
        cb_lock_span = get_first_span_by_filter(spans, filter)
        assert (cb_lock_span)

        filter = lambda span: span.n == "couchbase" and span.data["couchbase"][
            "type"] == "upsert"
        cb_upsert_span = get_first_span_by_filter(spans, filter)
        assert (cb_upsert_span)

        # Same traceId and parent relationship
        self.assertEqual(test_span.t, cb_lock_span.t)
        self.assertEqual(test_span.t, cb_upsert_span.t)

        self.assertEqual(cb_lock_span.p, test_span.s)
        self.assertEqual(cb_upsert_span.p, test_span.s)

        assert (cb_lock_span.stack)
        self.assertIsNone(cb_lock_span.ec)
        assert (cb_upsert_span.stack)
        self.assertIsNone(cb_upsert_span.ec)

        self.assertEqual(cb_lock_span.data["couchbase"]["hostname"],
                         "%s:8091" % testenv['couchdb_host'])
        self.assertEqual(cb_lock_span.data["couchbase"]["bucket"],
                         'travel-sample')
        self.assertEqual(cb_lock_span.data["couchbase"]["type"], 'lock')
        self.assertEqual(cb_upsert_span.data["couchbase"]["hostname"],
                         "%s:8091" % testenv['couchdb_host'])
        self.assertEqual(cb_upsert_span.data["couchbase"]["bucket"],
                         'travel-sample')
        self.assertEqual(cb_upsert_span.data["couchbase"]["type"], 'upsert')

    def test_lock_unlock(self):
        res = None
        self.bucket.upsert("test_lock_unlock", "lock_this")

        with tracer.start_active_span('test'):
            rv = self.bucket.lock("test_lock_unlock", ttl=5)
            assert (rv)
            self.assertTrue(rv.success)

            # upsert automatically unlocks the key
            res = self.bucket.unlock("test_lock_unlock", rv.cas)
            assert (res)
            self.assertTrue(res.success)

        spans = self.recorder.queued_spans()
        self.assertEqual(3, len(spans))

        test_span = get_first_span_by_name(spans, 'sdk')
        assert (test_span)
        self.assertEqual(test_span.data["sdk"]["name"], 'test')

        filter = lambda span: span.n == "couchbase" and span.data["couchbase"][
            "type"] == "lock"
        cb_lock_span = get_first_span_by_filter(spans, filter)
        assert (cb_lock_span)

        filter = lambda span: span.n == "couchbase" and span.data["couchbase"][
            "type"] == "unlock"
        cb_unlock_span = get_first_span_by_filter(spans, filter)
        assert (cb_unlock_span)

        # Same traceId and parent relationship
        self.assertEqual(test_span.t, cb_lock_span.t)
        self.assertEqual(test_span.t, cb_unlock_span.t)

        self.assertEqual(cb_lock_span.p, test_span.s)
        self.assertEqual(cb_unlock_span.p, test_span.s)

        assert (cb_lock_span.stack)
        self.assertIsNone(cb_lock_span.ec)
        assert (cb_unlock_span.stack)
        self.assertIsNone(cb_unlock_span.ec)

        self.assertEqual(cb_lock_span.data["couchbase"]["hostname"],
                         "%s:8091" % testenv['couchdb_host'])
        self.assertEqual(cb_lock_span.data["couchbase"]["bucket"],
                         'travel-sample')
        self.assertEqual(cb_lock_span.data["couchbase"]["type"], 'lock')
        self.assertEqual(cb_unlock_span.data["couchbase"]["hostname"],
                         "%s:8091" % testenv['couchdb_host'])
        self.assertEqual(cb_unlock_span.data["couchbase"]["bucket"],
                         'travel-sample')
        self.assertEqual(cb_unlock_span.data["couchbase"]["type"], 'unlock')

    def test_lock_unlock_muilti(self):
        res = None
        self.bucket.upsert("test_lock_unlock_multi_1", "lock_this")
        self.bucket.upsert("test_lock_unlock_multi_2", "lock_this")

        keys_to_lock = ("test_lock_unlock_multi_1", "test_lock_unlock_multi_2")

        with tracer.start_active_span('test'):
            rv = self.bucket.lock_multi(keys_to_lock, ttl=5)
            assert (rv)
            self.assertTrue(rv['test_lock_unlock_multi_1'].success)
            self.assertTrue(rv['test_lock_unlock_multi_2'].success)

            res = self.bucket.unlock_multi(rv)
            assert (res)

        spans = self.recorder.queued_spans()
        self.assertEqual(3, len(spans))

        test_span = get_first_span_by_name(spans, 'sdk')
        assert (test_span)
        self.assertEqual(test_span.data["sdk"]["name"], 'test')

        filter = lambda span: span.n == "couchbase" and span.data["couchbase"][
            "type"] == "lock_multi"
        cb_lock_span = get_first_span_by_filter(spans, filter)
        assert (cb_lock_span)

        filter = lambda span: span.n == "couchbase" and span.data["couchbase"][
            "type"] == "unlock_multi"
        cb_unlock_span = get_first_span_by_filter(spans, filter)
        assert (cb_unlock_span)

        # Same traceId and parent relationship
        self.assertEqual(test_span.t, cb_lock_span.t)
        self.assertEqual(test_span.t, cb_unlock_span.t)

        self.assertEqual(cb_lock_span.p, test_span.s)
        self.assertEqual(cb_unlock_span.p, test_span.s)

        assert (cb_lock_span.stack)
        self.assertIsNone(cb_lock_span.ec)
        assert (cb_unlock_span.stack)
        self.assertIsNone(cb_unlock_span.ec)

        self.assertEqual(cb_lock_span.data["couchbase"]["hostname"],
                         "%s:8091" % testenv['couchdb_host'])
        self.assertEqual(cb_lock_span.data["couchbase"]["bucket"],
                         'travel-sample')
        self.assertEqual(cb_lock_span.data["couchbase"]["type"], 'lock_multi')
        self.assertEqual(cb_unlock_span.data["couchbase"]["hostname"],
                         "%s:8091" % testenv['couchdb_host'])
        self.assertEqual(cb_unlock_span.data["couchbase"]["bucket"],
                         'travel-sample')
        self.assertEqual(cb_unlock_span.data["couchbase"]["type"],
                         'unlock_multi')

    def test_remove(self):
        res = None
        self.bucket.upsert("test_remove", 1)

        with tracer.start_active_span('test'):
            res = self.bucket.remove("test_remove")

        assert (res)
        self.assertTrue(res.success)

        spans = self.recorder.queued_spans()
        self.assertEqual(2, len(spans))

        test_span = get_first_span_by_name(spans, 'sdk')
        assert (test_span)
        self.assertEqual(test_span.data["sdk"]["name"], 'test')

        cb_span = get_first_span_by_name(spans, 'couchbase')
        assert (cb_span)

        # Same traceId and parent relationship
        self.assertEqual(test_span.t, cb_span.t)
        self.assertEqual(cb_span.p, test_span.s)

        assert (cb_span.stack)
        self.assertIsNone(cb_span.ec)

        self.assertEqual(cb_span.data["couchbase"]["hostname"],
                         "%s:8091" % testenv['couchdb_host'])
        self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample')
        self.assertEqual(cb_span.data["couchbase"]["type"], 'remove')

    def test_remove_multi(self):
        res = None
        self.bucket.upsert("test_remove_multi_1", 1)
        self.bucket.upsert("test_remove_multi_2", 1)

        keys_to_remove = ("test_remove_multi_1", "test_remove_multi_2")

        with tracer.start_active_span('test'):
            res = self.bucket.remove_multi(keys_to_remove)

        assert (res)
        self.assertTrue(res['test_remove_multi_1'].success)
        self.assertTrue(res['test_remove_multi_2'].success)

        spans = self.recorder.queued_spans()
        self.assertEqual(2, len(spans))

        test_span = get_first_span_by_name(spans, 'sdk')
        assert (test_span)
        self.assertEqual(test_span.data["sdk"]["name"], 'test')

        cb_span = get_first_span_by_name(spans, 'couchbase')
        assert (cb_span)

        # Same traceId and parent relationship
        self.assertEqual(test_span.t, cb_span.t)
        self.assertEqual(cb_span.p, test_span.s)

        assert (cb_span.stack)
        self.assertIsNone(cb_span.ec)

        self.assertEqual(cb_span.data["couchbase"]["hostname"],
                         "%s:8091" % testenv['couchdb_host'])
        self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample')
        self.assertEqual(cb_span.data["couchbase"]["type"], 'remove_multi')

    def test_counter(self):
        res = None
        self.bucket.upsert("test_counter", 1)

        with tracer.start_active_span('test'):
            res = self.bucket.counter("test_counter", delta=10)

        assert (res)
        self.assertTrue(res.success)

        spans = self.recorder.queued_spans()
        self.assertEqual(2, len(spans))

        test_span = get_first_span_by_name(spans, 'sdk')
        assert (test_span)
        self.assertEqual(test_span.data["sdk"]["name"], 'test')

        cb_span = get_first_span_by_name(spans, 'couchbase')
        assert (cb_span)

        # Same traceId and parent relationship
        self.assertEqual(test_span.t, cb_span.t)
        self.assertEqual(cb_span.p, test_span.s)

        assert (cb_span.stack)
        self.assertIsNone(cb_span.ec)

        self.assertEqual(cb_span.data["couchbase"]["hostname"],
                         "%s:8091" % testenv['couchdb_host'])
        self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample')
        self.assertEqual(cb_span.data["couchbase"]["type"], 'counter')

    def test_counter_multi(self):
        res = None
        self.bucket.upsert("first_test_counter", 1)
        self.bucket.upsert("second_test_counter", 1)

        with tracer.start_active_span('test'):
            res = self.bucket.counter_multi(
                ("first_test_counter", "second_test_counter"))

        assert (res)
        self.assertTrue(res['first_test_counter'].success)
        self.assertTrue(res['second_test_counter'].success)

        spans = self.recorder.queued_spans()
        self.assertEqual(2, len(spans))

        test_span = get_first_span_by_name(spans, 'sdk')
        assert (test_span)
        self.assertEqual(test_span.data["sdk"]["name"], 'test')

        cb_span = get_first_span_by_name(spans, 'couchbase')
        assert (cb_span)

        # Same traceId and parent relationship
        self.assertEqual(test_span.t, cb_span.t)
        self.assertEqual(cb_span.p, test_span.s)

        assert (cb_span.stack)
        self.assertIsNone(cb_span.ec)

        self.assertEqual(cb_span.data["couchbase"]["hostname"],
                         "%s:8091" % testenv['couchdb_host'])
        self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample')
        self.assertEqual(cb_span.data["couchbase"]["type"], 'counter_multi')

    def test_mutate_in(self):
        res = None
        self.bucket.upsert(
            'king_arthur', {
                'name': 'Arthur',
                'email': '*****@*****.**',
                'interests': ['Holy Grail', 'African Swallows']
            })

        with tracer.start_active_span('test'):
            res = self.bucket.mutate_in(
                'king_arthur', SD.array_addunique('interests', 'Cats'),
                SD.counter('updates', 1))

        assert (res)
        self.assertTrue(res.success)

        spans = self.recorder.queued_spans()
        self.assertEqual(2, len(spans))

        test_span = get_first_span_by_name(spans, 'sdk')
        assert (test_span)
        self.assertEqual(test_span.data["sdk"]["name"], 'test')

        cb_span = get_first_span_by_name(spans, 'couchbase')
        assert (cb_span)

        # Same traceId and parent relationship
        self.assertEqual(test_span.t, cb_span.t)
        self.assertEqual(cb_span.p, test_span.s)

        assert (cb_span.stack)
        self.assertIsNone(cb_span.ec)

        self.assertEqual(cb_span.data["couchbase"]["hostname"],
                         "%s:8091" % testenv['couchdb_host'])
        self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample')
        self.assertEqual(cb_span.data["couchbase"]["type"], 'mutate_in')

    def test_lookup_in(self):
        res = None
        self.bucket.upsert(
            'king_arthur', {
                'name': 'Arthur',
                'email': '*****@*****.**',
                'interests': ['Holy Grail', 'African Swallows']
            })

        with tracer.start_active_span('test'):
            res = self.bucket.lookup_in('king_arthur', SD.get('email'),
                                        SD.get('interests'))

        assert (res)
        self.assertTrue(res.success)

        spans = self.recorder.queued_spans()
        self.assertEqual(2, len(spans))

        test_span = get_first_span_by_name(spans, 'sdk')
        assert (test_span)
        self.assertEqual(test_span.data["sdk"]["name"], 'test')

        cb_span = get_first_span_by_name(spans, 'couchbase')
        assert (cb_span)

        # Same traceId and parent relationship
        self.assertEqual(test_span.t, cb_span.t)
        self.assertEqual(cb_span.p, test_span.s)

        assert (cb_span.stack)
        self.assertIsNone(cb_span.ec)

        self.assertEqual(cb_span.data["couchbase"]["hostname"],
                         "%s:8091" % testenv['couchdb_host'])
        self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample')
        self.assertEqual(cb_span.data["couchbase"]["type"], 'lookup_in')

    def test_stats(self):
        res = None

        with tracer.start_active_span('test'):
            res = self.bucket.stats()

        assert (res)

        spans = self.recorder.queued_spans()
        self.assertEqual(2, len(spans))

        test_span = get_first_span_by_name(spans, 'sdk')
        assert (test_span)
        self.assertEqual(test_span.data["sdk"]["name"], 'test')

        cb_span = get_first_span_by_name(spans, 'couchbase')
        assert (cb_span)

        # Same traceId and parent relationship
        self.assertEqual(test_span.t, cb_span.t)
        self.assertEqual(cb_span.p, test_span.s)

        assert (cb_span.stack)
        self.assertIsNone(cb_span.ec)

        self.assertEqual(cb_span.data["couchbase"]["hostname"],
                         "%s:8091" % testenv['couchdb_host'])
        self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample')
        self.assertEqual(cb_span.data["couchbase"]["type"], 'stats')

    def test_ping(self):
        res = None

        with tracer.start_active_span('test'):
            res = self.bucket.ping()

        assert (res)

        spans = self.recorder.queued_spans()
        self.assertEqual(2, len(spans))

        test_span = get_first_span_by_name(spans, 'sdk')
        assert (test_span)
        self.assertEqual(test_span.data["sdk"]["name"], 'test')

        cb_span = get_first_span_by_name(spans, 'couchbase')
        assert (cb_span)

        # Same traceId and parent relationship
        self.assertEqual(test_span.t, cb_span.t)
        self.assertEqual(cb_span.p, test_span.s)

        assert (cb_span.stack)
        self.assertIsNone(cb_span.ec)

        self.assertEqual(cb_span.data["couchbase"]["hostname"],
                         "%s:8091" % testenv['couchdb_host'])
        self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample')
        self.assertEqual(cb_span.data["couchbase"]["type"], 'ping')

    def test_diagnostics(self):
        res = None

        with tracer.start_active_span('test'):
            res = self.bucket.diagnostics()

        assert (res)

        spans = self.recorder.queued_spans()
        self.assertEqual(2, len(spans))

        test_span = get_first_span_by_name(spans, 'sdk')
        assert (test_span)
        self.assertEqual(test_span.data["sdk"]["name"], 'test')

        cb_span = get_first_span_by_name(spans, 'couchbase')
        assert (cb_span)

        # Same traceId and parent relationship
        self.assertEqual(test_span.t, cb_span.t)
        self.assertEqual(cb_span.p, test_span.s)

        assert (cb_span.stack)
        self.assertIsNone(cb_span.ec)

        self.assertEqual(cb_span.data["couchbase"]["hostname"],
                         "%s:8091" % testenv['couchdb_host'])
        self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample')
        self.assertEqual(cb_span.data["couchbase"]["type"], 'diagnostics')

    def test_observe(self):
        res = None
        self.bucket.upsert('test_observe', 1)

        with tracer.start_active_span('test'):
            res = self.bucket.observe('test_observe')

        assert (res)
        self.assertTrue(res.success)

        spans = self.recorder.queued_spans()
        self.assertEqual(2, len(spans))

        test_span = get_first_span_by_name(spans, 'sdk')
        assert (test_span)
        self.assertEqual(test_span.data["sdk"]["name"], 'test')

        cb_span = get_first_span_by_name(spans, 'couchbase')
        assert (cb_span)

        # Same traceId and parent relationship
        self.assertEqual(test_span.t, cb_span.t)
        self.assertEqual(cb_span.p, test_span.s)

        assert (cb_span.stack)
        self.assertIsNone(cb_span.ec)

        self.assertEqual(cb_span.data["couchbase"]["hostname"],
                         "%s:8091" % testenv['couchdb_host'])
        self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample')
        self.assertEqual(cb_span.data["couchbase"]["type"], 'observe')

    def test_observe_multi(self):
        res = None
        self.bucket.upsert('test_observe_multi_1', 1)
        self.bucket.upsert('test_observe_multi_2', 1)

        keys_to_observe = ('test_observe_multi_1', 'test_observe_multi_2')

        with tracer.start_active_span('test'):
            res = self.bucket.observe_multi(keys_to_observe)

        assert (res)
        self.assertTrue(res['test_observe_multi_1'].success)
        self.assertTrue(res['test_observe_multi_2'].success)

        spans = self.recorder.queued_spans()
        self.assertEqual(2, len(spans))

        test_span = get_first_span_by_name(spans, 'sdk')
        assert (test_span)
        self.assertEqual(test_span.data["sdk"]["name"], 'test')

        cb_span = get_first_span_by_name(spans, 'couchbase')
        assert (cb_span)

        # Same traceId and parent relationship
        self.assertEqual(test_span.t, cb_span.t)
        self.assertEqual(cb_span.p, test_span.s)

        assert (cb_span.stack)
        self.assertIsNone(cb_span.ec)

        self.assertEqual(cb_span.data["couchbase"]["hostname"],
                         "%s:8091" % testenv['couchdb_host'])
        self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample')
        self.assertEqual(cb_span.data["couchbase"]["type"], 'observe_multi')

    def test_raw_n1ql_query(self):
        res = None

        with tracer.start_active_span('test'):
            res = self.bucket.n1ql_query("SELECT 1")

        assert (res)

        spans = self.recorder.queued_spans()
        self.assertEqual(2, len(spans))

        test_span = get_first_span_by_name(spans, 'sdk')
        assert (test_span)
        self.assertEqual(test_span.data["sdk"]["name"], 'test')

        cb_span = get_first_span_by_name(spans, 'couchbase')
        assert (cb_span)

        # Same traceId and parent relationship
        self.assertEqual(test_span.t, cb_span.t)
        self.assertEqual(cb_span.p, test_span.s)

        assert (cb_span.stack)
        self.assertIsNone(cb_span.ec)

        self.assertEqual(cb_span.data["couchbase"]["hostname"],
                         "%s:8091" % testenv['couchdb_host'])
        self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample')
        self.assertEqual(cb_span.data["couchbase"]["type"], 'n1ql_query')
        self.assertEqual(cb_span.data["couchbase"]["sql"], 'SELECT 1')

    def test_n1ql_query(self):
        res = None

        with tracer.start_active_span('test'):
            res = self.bucket.n1ql_query(
                N1QLQuery(
                    'SELECT name FROM `travel-sample` WHERE brewery_id ="mishawaka_brewing"'
                ))

        assert (res)

        spans = self.recorder.queued_spans()
        self.assertEqual(2, len(spans))

        test_span = get_first_span_by_name(spans, 'sdk')
        assert (test_span)
        self.assertEqual(test_span.data["sdk"]["name"], 'test')

        cb_span = get_first_span_by_name(spans, 'couchbase')
        assert (cb_span)

        # Same traceId and parent relationship
        self.assertEqual(test_span.t, cb_span.t)
        self.assertEqual(cb_span.p, test_span.s)

        assert (cb_span.stack)
        self.assertIsNone(cb_span.ec)

        self.assertEqual(cb_span.data["couchbase"]["hostname"],
                         "%s:8091" % testenv['couchdb_host'])
        self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample')
        self.assertEqual(cb_span.data["couchbase"]["type"], 'n1ql_query')
        self.assertEqual(
            cb_span.data["couchbase"]["sql"],
            'SELECT name FROM `travel-sample` WHERE brewery_id ="mishawaka_brewing"'
        )
예제 #9
0
class SDKClient(object):
    """Python SDK Client Implementation for testrunner - master branch Implementation"""

    def __init__(self, bucket, hosts = ["localhost"] , scheme = "couchbase",
                 ssl_path = None, uhm_options = None, password=None,
                 quiet=True, certpath = None, transcoder = None):
        self.connection_string = \
            self._createString(scheme = scheme, bucket = bucket, hosts = hosts,
                               certpath = certpath, uhm_options = uhm_options)
        self.password = password
        self.quiet = quiet
        self.transcoder = transcoder
        self.default_timeout = 0
        self._createConn()

    def _createString(self, scheme ="couchbase", bucket = None, hosts = ["localhost"], certpath = None, uhm_options = ""):
        connection_string = "{0}://{1}".format(scheme, ", ".join(hosts).replace(" ",""))
        if bucket != None:
            connection_string = "{0}/{1}".format(connection_string, bucket)
        if uhm_options != None:
            connection_string = "{0}?{1}".format(connection_string, uhm_options)
        if scheme == "couchbases":
            if "?" in connection_string:
                connection_string = "{0},certpath={1}".format(connection_string, certpath)
            else:
                connection_string = "{0}?certpath={1}".format(connection_string, certpath)
        return connection_string

    def _createConn(self):
        try:
            self.cb = CouchbaseBucket(self.connection_string, password = self.password,
                                  quiet = self.quiet, transcoder = self.transcoder)
            self.default_timeout = self.cb.timeout
        except BucketNotFoundError as e:
             raise

    def reconnect(self):
        self.cb.close()
        self._createConn()

    def close(self):
        self.cb._close()

    def counter_in(self, key, path, delta, create_parents=True, cas=0, ttl=0, persist_to=0, replicate_to=0):
        try:
            return self.cb.counter_in(key, path, delta, create_parents= create_parents, cas= cas, ttl= ttl, persist_to= persist_to, replicate_to= replicate_to)
        except CouchbaseError as e:
            raise

    def arrayappend_in(self, key, path, value, create_parents=True, cas=0, ttl=0, persist_to=0, replicate_to=0):
        try:
            return self.cb.arrayappend_in(key, path, value, create_parents=create_parents, cas=cas, ttl=ttl, persist_to=persist_to, replicate_to=replicate_to)
        except CouchbaseError as e:
            raise

    def arrayprepend_in(self, key, path, value, create_parents=True, cas=0, ttl=0, persist_to=0, replicate_to=0):
        try:
            return self.cb.arrayprepend_in(key, path, value, create_parents=create_parents, cas=cas, ttl=ttl, persist_to=persist_to, replicate_to=replicate_to)
        except CouchbaseError as e:
            raise

    def arrayaddunique_in(self, key, path, value, create_parents=True, cas=0, ttl=0, persist_to=0, replicate_to=0):
        try:
            return self.cb.addunique_in(key, path, value, create_parents=create_parents, cas=cas, ttl=ttl, persist_to=persist_to, replicate_to=replicate_to)
        except CouchbaseError as e:
            raise

    def arrayinsert_in(self, key, path, value, cas=0, ttl=0, persist_to=0, replicate_to=0):
        try:
            return self.cb.arrayinsert_in(key, path, value, cas=cas, ttl=ttl, persist_to=persist_to, replicate_to=replicate_to)
        except CouchbaseError as e:
            raise

    def remove_in(self, key, path,  cas=0, ttl=0):
        try:
            self.cb.remove_in(key, path, cas = cas, ttl = ttl)
        except CouchbaseError as e:
            raise

    def mutate_in(self, key, *specs, **kwargs):
        try:
            self.cb.mutate_in(key, *specs, **kwargs)
        except CouchbaseError as e:
            raise

    def lookup_in(self, key, *specs, **kwargs):
        try:
            self.cb.lookup_in(key, *specs, **kwargs)
        except CouchbaseError as e:
            raise

    def get_in(self, key, path):
        try:
            result = self.cb.get_in(key, path)
            return self.__translate_get(result)
        except CouchbaseError as e:
            raise

    def exists_in(self, key, path):
        try:
            self.cb.exists_in(key, path)
        except CouchbaseError as e:
            raise

    def replace_in(self, key, path, value, cas=0, ttl=0, persist_to=0, replicate_to=0):
        try:
            return self.cb.replace_in(key, path, value, cas=cas, ttl=ttl, persist_to=persist_to, replicate_to=replicate_to)
        except CouchbaseError as e:
            raise

    def insert_in(self, key, path, value, create_parents=True, cas=0, ttl=0, persist_to=0, replicate_to=0):
        try:
            return self.cb.insert_in(key, path, value, create_parents=create_parents, cas=cas, ttl=ttl, persist_to=persist_to, replicate_to=replicate_to)
        except CouchbaseError as e:
            raise

    def upsert_in(self, key, path, value, create_parents=True, cas=0, ttl=0, persist_to=0, replicate_to=0):
        try:
            return self.cb.upsert_in(key, path, value, create_parents=create_parents, cas=cas, ttl=ttl, persist_to=persist_to, replicate_to=replicate_to)
        except CouchbaseError as e:
            raise

    def append(self, key, value, cas=0, format=None, persist_to=0, replicate_to=0):
        try:
            self.cb.append(key, value, cas=cas, format=format, persist_to=persist_to, replicate_to=replicate_to)
        except CouchbaseError as e:
            try:
                time.sleep(10)
                self.cb.append(key, value, cas=cas, format=format, persist_to=persist_to, replicate_to=replicate_to)
            except CouchbaseError as e:
                raise

    def append_multi(self, keys, cas=0, format=None, persist_to=0, replicate_to=0):
        try:
            self.cb.append_multi(keys, cas=cas, format=format, persist_to=persist_to, replicate_to=replicate_to)
        except CouchbaseError as e:
            try:
                time.sleep(10)
                self.cb.append_multi(keys, cas=cas, format=format, persist_to=persist_to, replicate_to=replicate_to)
            except CouchbaseError as e:
                raise

    def prepend(self, key, value, cas=0, format=None, persist_to=0, replicate_to=0):
        try:
            self.cb.prepend(key, value, cas=cas, format=format, persist_to=persist_to, replicate_to=replicate_to)
        except CouchbaseError as e:
            try:
                self.cb.prepend(key, value, cas=cas, format=format, persist_to=persist_to, replicate_to=replicate_to)
            except CouchbaseError as e:
                raise

    def prepend_multi(self, keys, cas=0, format=None, persist_to=0, replicate_to=0):
        try:
            self.cb.prepend_multi(keys, cas=cas, format=format, persist_to=persist_to, replicate_to=replicate_to)
        except CouchbaseError as e:
            try:
                time.sleep(10)
                self.cb.prepend_multi(keys, cas=cas, format=format, persist_to=persist_to, replicate_to=replicate_to)
            except CouchbaseError as e:
                raise

    def replace(self, key, value, cas=0, ttl=0, format=None, persist_to=0, replicate_to=0):
        try:
           self.cb.replace( key, value, cas=cas, ttl=ttl, format=format,
                                    persist_to=persist_to, replicate_to=replicate_to)
        except CouchbaseError as e:
            try:
                time.sleep(10)
                self.cb.replace( key, value, cas=cas, ttl=ttl, format=format,
                                    persist_to=persist_to, replicate_to=replicate_to)
            except CouchbaseError as e:
                raise

    def replace_multi(self, keys, cas=0, ttl=0, format=None, persist_to=0, replicate_to=0):
        try:
            self.cb.replace_multi( keys, cas=cas, ttl=ttl, format=format, persist_to=persist_to, replicate_to=replicate_to)
        except CouchbaseError as e:
            try:
                time.sleep(10)
                self.cb.replace_multi( keys, cas=cas, ttl=ttl, format=format, persist_to=persist_to, replicate_to=replicate_to)
            except CouchbaseError as e:
                raise

    def cas(self, key, value, cas=0, ttl=0, format=None):
        return self.cb.replace(key, value, cas=cas,format=format)

    def delete(self,key, cas=0, quiet=True, persist_to=0, replicate_to=0):
        self.remove(key, cas=cas, quiet=quiet, persist_to=persist_to, replicate_to=replicate_to)

    def remove(self,key, cas=0, quiet=True, persist_to=0, replicate_to=0):
        try:
            return self.cb.remove(key, cas=cas, quiet=quiet, persist_to=persist_to, replicate_to=replicate_to)
        except CouchbaseError as e:
            try:
                time.sleep(10)
                return self.cb.remove(key, cas=cas, quiet=quiet, persist_to=persist_to, replicate_to=replicate_to)
            except CouchbaseError as e:
                raise

    def delete(self, keys, quiet=True, persist_to=0, replicate_to=0):
        return self.remove(self, keys, quiet=quiet, persist_to=persist_to, replicate_to=replicate_to)

    def remove_multi(self, keys, quiet=True, persist_to=0, replicate_to=0):
        try:
            self.cb.remove_multi(keys, quiet=quiet, persist_to=persist_to, replicate_to=replicate_to)
        except CouchbaseError as e:
            try:
                time.sleep(10)
                self.cb.remove_multi(keys, quiet=quiet, persist_to=persist_to, replicate_to=replicate_to)
            except CouchbaseError as e:
                raise

    def set(self, key, value, cas=0, ttl=0, format=None, persist_to=0, replicate_to=0):
        return self.upsert(key, value, cas=cas, ttl=ttl, format=format, persist_to=persist_to, replicate_to=replicate_to)

    def upsert(self, key, value, cas=0, ttl=0, format=None, persist_to=0, replicate_to=0):
        try:
            self.cb.upsert(key, value, cas, ttl, format, persist_to, replicate_to)
        except CouchbaseError as e:
            try:
                time.sleep(10)
                self.cb.upsert(key, value, cas, ttl, format, persist_to, replicate_to)
            except CouchbaseError as e:
                raise

    def set_multi(self, keys, ttl=0, format=None, persist_to=0, replicate_to=0):
        return self.upsert_multi(keys, ttl=ttl, format=format, persist_to=persist_to, replicate_to=replicate_to)

    def upsert_multi(self, keys, ttl=0, format=None, persist_to=0, replicate_to=0):
        try:
            self.cb.upsert_multi(keys, ttl=ttl, format=format, persist_to=persist_to, replicate_to=replicate_to)
        except CouchbaseError as e:
            try:
                time.sleep(10)
                self.cb.upsert_multi(keys, ttl=ttl, format=format, persist_to=persist_to, replicate_to=replicate_to)
            except CouchbaseError as e:
                raise

    def insert(self, key, value, ttl=0, format=None, persist_to=0, replicate_to=0):
        try:
            self.cb.insert(key, value, ttl=ttl, format=format, persist_to=persist_to, replicate_to=replicate_to)
        except CouchbaseError as e:
            try:
                time.sleep(10)
                self.cb.insert(key, value, ttl=ttl, format=format, persist_to=persist_to, replicate_to=replicate_to)
            except CouchbaseError as e:
                raise

    def insert_multi(self, keys,  ttl=0, format=None, persist_to=0, replicate_to=0):
        try:
            self.cb.insert_multi(keys, ttl=ttl, format=format, persist_to=persist_to, replicate_to=replicate_to)
        except CouchbaseError as e:
            try:
                time.sleep(10)
                self.cb.insert_multi(keys, ttl=ttl, format=format, persist_to=persist_to, replicate_to=replicate_to)
            except CouchbaseError as e:
                raise

    def touch(self, key, ttl = 0):
        try:
            self.cb.touch(key, ttl=ttl)
        except CouchbaseError as e:
            try:
                time.sleep(10)
                self.cb.touch(key, ttl=ttl)
            except CouchbaseError as e:
                raise

    def touch_multi(self, keys, ttl = 0):
        try:
            self.cb.touch_multi(keys, ttl=ttl)
        except CouchbaseError as e:
            try:
                time.sleep(10)
                self.cb.touch_multi(keys, ttl=ttl)
            except CouchbaseError as e:
                raise

    def decr(self, key, delta=1, initial=None, ttl=0):
        self.counter(key, delta=-delta, initial=initial, ttl=ttl)

    def decr_multi(self, keys, delta=1, initial=None, ttl=0):
        self.counter_multi(keys, delta=-delta, initial=initial, ttl=ttl)

    def incr(self, key, delta=1, initial=None, ttl=0):
        self.counter(key, delta=delta, initial=initial, ttl=ttl)

    def incr_multi(self, keys, delta=1, initial=None, ttl=0):
        self.counter_multi(keys, delta=delta, initial=initial, ttl=ttl)

    def counter(self, key, delta=1, initial=None, ttl=0):
        try:
            self.cb.counter(key, delta=delta, initial=initial, ttl=ttl)
        except CouchbaseError as e:
            try:
                time.sleep(10)
                self.cb.counter(key, delta=delta, initial=initial, ttl=ttl)
            except CouchbaseError as e:
                raise
    def counter_multi(self, keys, delta=1, initial=None, ttl=0):
        try:
            self.cb.counter_multi(keys, delta=delta, initial=initial, ttl=ttl)
        except CouchbaseError as e:
            try:
                time.sleep(10)
                self.cb.counter_multi(keys, delta=delta, initial=initial, ttl=ttl)
            except CouchbaseError as e:
                raise

    def get(self, key, ttl=0, quiet=True, replica=False, no_format=False):
        try:
            rv = self.cb.get(key, ttl=ttl, quiet=quiet, replica=replica, no_format=no_format)
            return self.__translate_get(rv)
        except CouchbaseError as e:
            try:
                time.sleep(10)
                rv = self.cb.get(key, ttl=ttl, quiet=quiet, replica=replica, no_format=no_format)
                return self.__translate_get(rv)
            except CouchbaseError as e:
                raise

    def rget(self, key, replica_index=None, quiet=True):
        try:
            data  = self.rget(key, replica_index=replica_index, quiet=None)
            return self.__translate_get(data)
        except CouchbaseError as e:
            try:
                time.sleep(10)
                data  = self.rget(key, replica_index=replica_index, quiet=None)
                return self.__translate_get(data)
            except CouchbaseError as e:
                raise

    def get_multi(self, keys, ttl=0, quiet=True, replica=False, no_format=False):
        try:
            data = self.cb.get_multi(keys, ttl=ttl, quiet=quiet, replica=replica, no_format=no_format)
            return self.__translate_get_multi(data)
        except CouchbaseError as e:
            try:
                time.sleep(10)
                data = self.cb.get_multi(keys, ttl=ttl, quiet=quiet, replica=replica, no_format=no_format)
                return self.__translate_get_multi(data)
            except CouchbaseError as e:
                raise

    def rget_multi(self, key, replica_index=None, quiet=True):
        try:
            data = self.cb.rget_multi(key, replica_index=None, quiet=quiet)
            return self.__translate_get_multi(data)
        except CouchbaseError as e:
            try:
                time.sleep(10)
                data = self.cb.rget_multi(key, replica_index=None, quiet=quiet)
                return self.__translate_get_multi(data)
            except CouchbaseError as e:
                raise

    def stats(self, keys=None):
        try:
            stat_map = self.cb.stats(keys = keys)
            return stat_map
        except CouchbaseError as e:
            try:
                time.sleep(10)
                return self.cb.stats(keys = keys)
            except CouchbaseError as e:
                raise

    def errors(self, clear_existing=True):
        try:
            rv = self.cb.errors(clear_existing = clear_existing)
            return rv
        except CouchbaseError as e:
            raise

    def observe(self, key, master_only=False):
        try:
            return self.cb.observe(key, master_only = master_only)
        except CouchbaseError as e:
            try:
                time.sleep(10)
                return self.cb.observe(key, master_only = master_only)
            except CouchbaseError as e:
                raise

    def observe_multi(self, keys, master_only=False):
        try:
            data = self.cb.observe_multi(keys, master_only = master_only)
            return self.__translate_observe_multi(data)
        except CouchbaseError as e:
            try:
                time.sleep(10)
                data = self.cb.observe_multi(keys, master_only = master_only)
                return self.__translate_observe_multi(data)
            except CouchbaseError as e:
                raise

    def endure(self, key, persist_to=-1, replicate_to=-1, cas=0, check_removed=False, timeout=5.0, interval=0.010):
        try:
            self.cb.endure(key, persist_to=persist_to, replicate_to=replicate_to,
                           cas=cas, check_removed=check_removed, timeout=timeout, interval=interval)
        except CouchbaseError as e:
            try:
                time.sleep(10)
                self.cb.endure(key, persist_to=persist_to, replicate_to=replicate_to,
                    cas=cas, check_removed=check_removed, timeout=timeout, interval=interval)
            except CouchbaseError as e:
                raise

    def endure_multi(self, keys, persist_to=-1, replicate_to=-1, cas=0, check_removed=False, timeout=5.0, interval=0.010):
        try:
            self.cb.endure(keys, persist_to=persist_to, replicate_to=replicate_to,
                           cas=cas, check_removed=check_removed, timeout=timeout, interval=interval)
        except CouchbaseError as e:
            try:
                time.sleep(10)
                self.cb.endure(keys, persist_to=persist_to, replicate_to=replicate_to,
                           cas=cas, check_removed=check_removed, timeout=timeout, interval=interval)
            except CouchbaseError as e:
                raise

    def lock(self, key, ttl=0):
        try:
            data = self.cb.lock(key, ttl = ttl)
            return self.__translate_get(data)
        except CouchbaseError as e:
            try:
                time.sleep(10)
                data = self.cb.lock(key, ttl = ttl)
                return self.__translate_get(data)
            except CouchbaseError as e:
                raise

    def lock_multi(self, keys, ttl=0):
        try:
            data = self.cb.lock_multi(keys, ttl = ttl)
            return self.__translate_get_multi(data)
        except CouchbaseError as e:
            try:
                time.sleep(10)
                data = self.cb.lock_multi(keys, ttl = ttl)
                return self.__translate_get_multi(data)
            except CouchbaseError as e:
                raise

    def unlock(self, key, ttl=0):
        try:
            return self.cb.unlock(key)
        except CouchbaseError as e:
            try:
                time.sleep(10)
                return self.cb.unlock(key)
            except CouchbaseError as e:
                raise

    def unlock_multi(self, keys):
        try:
            return self.cb.unlock_multi(keys)
        except CouchbaseError as e:
            try:
                time.sleep(10)
                return self.cb.unlock_multi(keys)
            except CouchbaseError as e:
                raise

    def n1ql_query(self, statement, prepared=False):
        try:
            return N1QLQuery(statement, prepared)
        except CouchbaseError as e:
            raise

    def n1ql_request(self, query):
        try:
            return N1QLRequest(query, self.cb)
        except CouchbaseError as e:
            raise

    def __translate_get_multi(self, data):
        map = {}
        if data == None:
            return map
        for key, result in data.items():
            map[key] = [result.flags, result.cas, result.value]
        return map

    def __translate_get(self, data):
        return data.flags, data.cas, data.value

    def __translate_delete(self, data):
        return data

    def __translate_observe(self, data):
        return data

    def __translate_observe_multi(self, data):
        map = {}
        if data == None:
            return map
        for key, result in data.items():
            map[key] = result.value
        return map

    def __translate_upsert_multi(self, data):
        map = {}
        if data == None:
            return map
        for key, result in data.items():
            map[key] = result
        return map

    def __translate_upsert_op(self, data):
        return data.rc, data.success, data.errstr, data.key
예제 #10
0
#!/usr/bin/env python
from __future__ import print_function

from couchbase.bucket import Bucket

cb = Bucket('couchbase://10.0.0.31/default')

# First insert the documents we care about
cb.upsert_multi({
    'foo': {'foo': 'value'},
    'bar': {'bar': 'value'},
    'baz': {'baz': 'value'}
})

# Get them back again
rvs = cb.get_multi(['foo', 'bar', 'baz'])
for key, info in rvs.items():
    print('Value for {0}: {1}'.format(key, info.value))

# See other error handling examples showing how to handle errors
# in multi operations
예제 #11
0
class SDKClient(object):
    """Python SDK Client Implementation for testrunner - master branch Implementation"""

    def __init__(self, bucket, hosts = ["localhost"] , scheme = "couchbase",
                 ssl_path = None, uhm_options = None, password=None,
                 quiet=False, certpath = None, transcoder = None):
        self.connection_string = \
            self._createString(scheme = scheme, bucket = bucket, hosts = hosts,
                               certpath = certpath, uhm_options = uhm_options)
        self.password = password
        self.quiet = quiet
        self.transcoder = transcoder
        self._createConn()

    def _createString(self, scheme ="couchbase", bucket = None, hosts = ["localhost"], certpath = None, uhm_options = ""):
        connection_string = "{0}://{1}".format(scheme, ", ".join(hosts).replace(" ",""))
        if bucket != None:
            connection_string = "{0}/{1}".format(connection_string, bucket)
        if uhm_options != None:
            connection_string = "{0}?{1}".format(connection_string, uhm_options)
        if scheme == "couchbases":
            if "?" in connection_string:
                connection_string = "{0},certpath={1}".format(connection_string, certpath)
            else:
                connection_string = "{0}?certpath={1}".format(connection_string, certpath)
        return connection_string

    def _createConn(self):
        try:
            self.cb = CouchbaseBucket(self.connection_string, password = self.password,
                                  quiet = self.quiet, transcoder = self.transcoder)
        except BucketNotFoundError as e:
             raise

    def reconnect(self):
        self.cb.close()
        self._createConn()

    def close(self):
        self.cb._close()

    def append(self, key, value, cas=0, format=None, persist_to=0, replicate_to=0):
        try:
            self.cb.append(key, value, cas=cas, format=format, persist_to=persist_to, replicate_to=replicate_to)
        except CouchbaseError as e:
            raise

    def append_multi(self, keys, cas=0, format=None, persist_to=0, replicate_to=0):
        try:
            self.cb.append_multi(keys, cas=cas, format=format, persist_to=persist_to, replicate_to=replicate_to)
        except CouchbaseError as e:
            raise

    def prepend(self, key, value, cas=0, format=None, persist_to=0, replicate_to=0):
        try:
            self.cb.prepend(key, value, cas=cas, format=format, persist_to=persist_to, replicate_to=replicate_to)
        except CouchbaseError as e:
            raise

    def prepend_multi(self, keys, cas=0, format=None, persist_to=0, replicate_to=0):
        try:
            self.cb.prepend_multi(keys, cas=cas, format=format, persist_to=persist_to, replicate_to=replicate_to)
        except CouchbaseError as e:
            raise

    def replace(self, key, value, cas=0, ttl=0, format=None, persist_to=0, replicate_to=0):
        try:
           self.cb.replace( key, value, cas=cas, ttl=ttl, format=format,
                                    persist_to=persist_to, replicate_to=replicate_to)
        except CouchbaseError as e:
            raise

    def replace_multi(self, keys, cas=0, ttl=0, format=None, persist_to=0, replicate_to=0):
        try:
            self.cb.replace_multi( keys, cas=cas, ttl=ttl, format=format, persist_to=persist_to, replicate_to=replicate_to)
        except CouchbaseError as e:
            raise

    def cas(self, key, value, cas=0, ttl=0, format=None):
        return self.cb.replace(key, value, cas=cas,format=format)

    def delete(self,key, cas=0, quiet=None, persist_to=0, replicate_to=0):
        self.remove(key, cas=cas, quiet=quiet, persist_to=persist_to, replicate_to=replicate_to)

    def remove(self,key, cas=0, quiet=None, persist_to=0, replicate_to=0):
        try:
            return self.cb.remove(key, cas=cas, quiet=quiet, persist_to=persist_to, replicate_to=replicate_to)
        except CouchbaseError as e:
            raise

    def delete(self, keys, quiet=None, persist_to=0, replicate_to=0):
        return self.remove(self, keys, quiet=quiet, persist_to=persist_to, replicate_to=replicate_to)

    def remove_multi(self, keys, quiet=None, persist_to=0, replicate_to=0):
        try:
            self.cb.remove_multi(keys, quiet=quiet, persist_to=persist_to, replicate_to=replicate_to)
        except CouchbaseError as e:
            raise

    def set(self, key, value, cas=0, ttl=0, format=None, persist_to=0, replicate_to=0):
        return self.upsert(key, value, cas=cas, ttl=ttl, format=format, persist_to=persist_to, replicate_to=replicate_to)

    def upsert(self, key, value, cas=0, ttl=0, format=None, persist_to=0, replicate_to=0):
        try:
            self.cb.upsert(key, value, cas, ttl, format, persist_to, replicate_to)
        except CouchbaseError as e:
            raise

    def set_multi(self, keys, ttl=0, format=None, persist_to=0, replicate_to=0):
        return self.upsert_multi(keys, ttl=ttl, format=format, persist_to=persist_to, replicate_to=replicate_to)

    def upsert_multi(self, keys, ttl=0, format=None, persist_to=0, replicate_to=0):
        try:
            self.cb.upsert_multi(keys, ttl=ttl, format=format, persist_to=persist_to, replicate_to=replicate_to)
        except CouchbaseError as e:
            raise

    def insert(self, key, value, ttl=0, format=None, persist_to=0, replicate_to=0):
        try:
            self.cb.insert(key, value, ttl=ttl, format=format, persist_to=persist_to, replicate_to=replicate_to)
        except CouchbaseError as e:
            raise

    def insert_multi(self, keys,  ttl=0, format=None, persist_to=0, replicate_to=0):
        try:
            self.cb.insert_multi(keys, ttl=ttl, format=format, persist_to=persist_to, replicate_to=replicate_to)
        except CouchbaseError as e:
            raise

    def touch(self, key, ttl = 0):
        try:
            self.cb.touch(key, ttl=ttl)
        except CouchbaseError as e:
            raise

    def touch_multi(self, keys, ttl = 0):
        try:
            self.cb.touch_multi(keys, ttl=ttl)
        except CouchbaseError as e:
            raise

    def decr(self, key, delta=1, initial=None, ttl=0):
        self.counter(key, delta=-delta, initial=initial, ttl=ttl)


    def decr_multi(self, keys, delta=1, initial=None, ttl=0):
        self.counter_multi(keys, delta=-delta, initial=initial, ttl=ttl)

    def incr(self, key, delta=1, initial=None, ttl=0):
        self.counter(key, delta=delta, initial=initial, ttl=ttl)


    def incr_multi(self, keys, delta=1, initial=None, ttl=0):
        self.counter_multi(keys, delta=delta, initial=initial, ttl=ttl)

    def counter(self, key, delta=1, initial=None, ttl=0):
        try:
            self.cb.counter(key, delta=delta, initial=initial, ttl=ttl)
        except CouchbaseError as e:
            raise

    def counter_multi(self, keys, delta=1, initial=None, ttl=0):
        try:
            self.cb.counter_multi(keys, delta=delta, initial=initial, ttl=ttl)
        except CouchbaseError as e:
            raise

    def get(self, key, ttl=0, quiet=None, replica=False, no_format=False):
        try:
            rv = self.cb.get(key, ttl=ttl, quiet=quiet, replica=replica, no_format=no_format)
            return self.__translate_get(rv)
        except CouchbaseError as e:
            raise

    def rget(self, key, replica_index=None, quiet=None):
        try:
            data  = self.rget(key, replica_index=replica_index, quiet=None)
            return self.__translate_get(data)
        except CouchbaseError as e:
            raise

    def get_multi(self, keys, ttl=0, quiet=None, replica=False, no_format=False):
        try:
            data = self.cb.get_multi(keys, ttl=ttl, quiet=quiet, replica=replica, no_format=no_format)
            return self.__translate_get_multi(data)
        except CouchbaseError as e:
            raise

    def rget_multi(self, key, replica_index=None, quiet=None):
        try:
            data = self.cb.rget_multi(key, replica_index=None, quiet=None)
            return self.__translate_get_multi(data)
        except CouchbaseError as e:
            raise

    def stats(self, keys=None):
        try:
            stat_map = self.cb.stats(keys = keys)
            return stat_map
        except CouchbaseError as e:
            raise

    def errors(self, clear_existing=True):
        try:
            rv = self.cb.errors(clear_existing = clear_existing)
            return rv
        except CouchbaseError as e:
            raise

    def observe(self, key, master_only=False):
        try:
            return self.cb.observe(key, master_only = master_only)
        except CouchbaseError as e:
            raise

    def observe_multi(self, keys, master_only=False):
        try:
            data = self.cb.observe_multi(keys, master_only = master_only)
            return self.__translate_observe_multi(data)
        except CouchbaseError as e:
            raise

    def endure(self, key, persist_to=-1, replicate_to=-1, cas=0, check_removed=False, timeout=5.0, interval=0.010):
        try:
            self.cb.endure(key, persist_to=persist_to, replicate_to=replicate_to,
                           cas=cas, check_removed=check_removed, timeout=timeout, interval=interval)
        except CouchbaseError as e:
            raise

    def endure_multi(self, keys, persist_to=-1, replicate_to=-1, cas=0, check_removed=False, timeout=5.0, interval=0.010):
        try:
            self.cb.endure(keys, persist_to=persist_to, replicate_to=replicate_to,
                           cas=cas, check_removed=check_removed, timeout=timeout, interval=interval)
        except CouchbaseError as e:
            raise

    def lock(self, key, ttl=0):
        try:
            data = self.cb.lock(key, ttl = ttl)
            return self.__translate_get(data)
        except CouchbaseError as e:
            raise

    def lock_multi(self, keys, ttl=0):
        try:
            data = self.cb.lock_multi(keys, ttl = ttl)
            return self.__translate_get_multi(data)
        except CouchbaseError as e:
            raise

    def unlock(self, key, ttl=0):
        try:
            return self.cb.unlock(key)
        except CouchbaseError as e:
            raise

    def unlock_multi(self, keys):
        try:
            return self.cb.unlock_multi(keys)
        except CouchbaseError as e:
            raise

    def __translate_get_multi(self, data):
        map = {}
        if data == None:
            return map
        for key, result in data.items():
            map[key] = [result.flags, result.cas, result.value]
        return map

    def __translate_get(self, data):
        return data.flags, data.cas, data.value

    def __translate_delete(self, data):
        return data

    def __translate_observe(self, data):
        return data

    def __translate_observe_multi(self, data):
        map = {}
        if data == None:
            return map
        for key, result in data.items():
            map[key] = result.value
        return map

    def __translate_upsert_multi(self, data):
        map = {}
        if data == None:
            return map
        for key, result in data.items():
            map[key] = result
        return map

    def __translate_upsert_op(self, data):
        return data.rc, data.success, data.errstr, data.key
class CouchbaseMemcacheMirror(object):
    def __init__(self, couchbase_uri, memcached_hosts, primary=PRIMARY_COUCHBASE):
        """
        :param couchbase_uri: Connection string for Couchbase
        :param memcached_hosts: List of Memcached nodes
        :param primary: Determines which datastore is authoritative.
            This affects how get operations are performed and which datastore
            is used for CAS operations.
                PRIMARY_COUCHBASE: Couchbase is authoritative
                PRIMARY_MEMCACHED: Memcached is authoritative
            By default, Couchbase is the primary store
        :return:
        """
        self.cb = CbBucket(couchbase_uri)
        self.mc = McClient(memcached_hosts)
        self._primary = primary

    @property
    def primary(self):
        return self._primary

    def _cb_get(self, key):
        try:
            return self.cb.get(key).value
        except NotFoundError:
            return None

    def get(self, key, try_alternate=True):
        """
        Gets a document
        :param key: The key to retrieve
        :param try_alternate: Whether to try the secondary data source if the
            item is not found in the primary.
        :return: The value as a Python object
        """
        if self._primary == PRIMARY_COUCHBASE:
            order = [self._cb_get, self.mc.get]
        else:
            order = [self.mc.get, self._cb_get]

        for meth in order:
            ret = meth(key)
            if ret or not try_alternate:
                return ret

        return None

    def _cb_mget(self, keys):
        """
        Internal method to execute a Couchbase multi-get
        :param keys: The keys to retrieve
        :return: A tuple of {found_key:found_value, ...}, [missing_key1,...]
        """
        try:
            ok_rvs = self.cb.get_multi(keys)
            bad_rvs = {}
        except NotFoundError as e:
            ok_rvs, bad_rvs = e.split_results()

        ok_dict = {k: (v.value, v.cas) for k, v in ok_rvs}
        return ok_dict, bad_rvs.keys()

    def get_multi(self, keys, try_alternate=True):
        """
        Gets multiple items from the server
        :param keys: The keys to fetch as an iterable
        :param try_alternate: Whether to fetch missing items from alternate store
        :return: A dictionary of key:value. Only contains keys which exist and have values
        """
        if self._primary == PRIMARY_COUCHBASE:
            ok, err = self._cb_get(keys)
            if err and try_alternate:
                ok.update(self.mc.get_many(err))
            return ok
        else:
            ok = self.mc.get_many(keys)
            if len(ok) < len(keys) and try_alternate:
                keys_err = set(keys) - set(ok)
                ok.update(self._cb_mget(list(keys_err))[0])
            return ok

    def gets(self, key):
        """
        Get an item with its CAS. The item will always be fetched from the primary
        data store.

        :param key: the key to get
        :return: the value of the key, or None if no such value
        """
        if self._primary == PRIMARY_COUCHBASE:
            try:
                rv = self.cb.get(key)
                return key, rv.cas
            except NotFoundError:
                return None, None
        else:
            return self.mc.gets(key)

    def gets_multi(self, keys):
        if self._primary == PRIMARY_COUCHBASE:
            try:
                rvs = self.cb.get_multi(keys)
            except NotFoundError as e:
                rvs, _ = e.split_results()

            return {k: (v.value, v.cas) for k, v in rvs}
        else:
            # TODO: I'm not sure if this is implemented in HasClient :(
            return self.mc.gets_many(keys)

    def delete(self, key):
        st = Status()
        try:
            self.cb.remove(key)
        except NotFoundError as e:
            st.cb_error = e

        st.mc_status = self.mc.delete(key)
        return st

    def delete_multi(self, keys):
        st = Status()
        try:
            self.cb.remove_multi(keys)
        except NotFoundError as e:
            st.cb_error = e

        st.mc_status = self.mc.delete_many(keys)

    def _do_incrdecr(self, key, value, is_incr):
        cb_value = value if is_incr else -value
        mc_meth = self.mc.incr if is_incr else self.mc.decr
        st = Status()
        try:
            self.cb.counter(key, delta=cb_value)
        except NotFoundError as e:
            st.cb_error = e

        st.mc_status = mc_meth(key, value)

    def incr(self, key, value):
        return self._do_incrdecr(key, value, True)

    def decr(self, key, value):
        return self._do_incrdecr(key, value, False)

    def touch(self, key, expire=0):
        st = Status()
        try:
            self.cb.touch(key, ttl=expire)
        except NotFoundError as e:
            st.cb_error = st

        st.mc_status = self.mc.touch(key)

    def set(self, key, value, expire=0):
        """
        Write first to Couchbase, and then to Memcached
        :param key: Key to use
        :param value: Value to use
        :param expire: If set, the item will expire in the given amount of time
        :return: Status object if successful (will always be success).
                 on failure an exception is raised
        """
        self.cb.upsert(key, value, ttl=expire)
        self.mc.set(key, value, expire=expire)
        return Status()

    def set_multi(self, values, expire=0):
        """
        Set multiple items.
        :param values: A dictionary of key, value indicating values to store
        :param expire: If present, expiration time for all the items
        :return:
        """
        self.cb.upsert_multi(values, ttl=expire)
        self.mc.set_many(values, expire=expire)
        return Status()

    def replace(self, key, value, expire=0):
        """
        Replace existing items
        :param key: key to replace
        :param value: new value
        :param expire: expiration for item
        :return: Status object. Will be OK
        """
        status = Status()
        try:
            self.cb.replace(key, value, ttl=expire)
        except NotFoundError as e:
            status.cb_error = e

        status.mc_status = self.mc.replace(key, value, expire=expire)
        return status

    def add(self, key, value, expire=0):
        status = Status()
        try:
            self.cb.insert(key, value, ttl=expire)
        except KeyExistsError as e:
            status.cb_error = e

        status.mc_status = self.mc.add(key, value, expire=expire)
        return status

    def _append_prepend(self, key, value, is_append):
        cb_meth = self.cb.append if is_append else self.cb.prepend
        mc_meth = self.mc.append if is_append else self.mc.prepend
        st = Status()

        try:
            cb_meth(key, value, format=FMT_UTF8)
        except (NotStoredError, NotFoundError) as e:
            st.cb_error = e

        st.mc_status = mc_meth(key, value)

    def append(self, key, value):
        return self._append_prepend(key, value, True)

    def prepend(self, key, value):
        return self._append_prepend(key, value, False)

    def cas(self, key, value, cas, expire=0):
        if self._primary == PRIMARY_COUCHBASE:
            try:
                self.cb.replace(key, value, cas=cas, ttl=expire)
                self.mc.set(key, value, ttl=expire)
                return True
            except KeyExistsError:
                return False
            except NotFoundError:
                return None
        else:
            return self.mc.cas(key, value, cas)
예제 #13
0
def test_upgrade(params_from_base_test_setup):
    """
    @summary
        The initial versions of SG and CBS has already been provisioned at this point
        We have to upgrade them to the upgraded versions
    """
    cluster_config = params_from_base_test_setup['cluster_config']
    mode = params_from_base_test_setup['mode']
    xattrs_enabled = params_from_base_test_setup['xattrs_enabled']
    ls_url = params_from_base_test_setup["ls_url"]
    server_version = params_from_base_test_setup['server_version']
    sync_gateway_version = params_from_base_test_setup['sync_gateway_version']
    server_upgraded_version = params_from_base_test_setup[
        'server_upgraded_version']
    sync_gateway_upgraded_version = params_from_base_test_setup[
        'sync_gateway_upgraded_version']
    sg_url = params_from_base_test_setup['sg_url']
    sg_admin_url = params_from_base_test_setup['sg_admin_url']
    num_docs = int(params_from_base_test_setup['num_docs'])
    cbs_platform = params_from_base_test_setup['cbs_platform']
    cbs_toy_build = params_from_base_test_setup['cbs_toy_build']
    sg_conf = "{}/resources/sync_gateway_configs/sync_gateway_default_functional_tests_{}.json".format(
        os.getcwd(), mode)

    # Add data to liteserv
    client = MobileRestClient()
    log_info("ls_url: {}".format(ls_url))
    ls_db = client.create_database(ls_url, name="ls_db")

    # Create user and session on SG
    sg_user_channels = ["sg_user_channel"]
    sg_db = "db"
    sg_user_name = "sg_user"
    sg_user_password = "******"
    client.create_user(url=sg_admin_url,
                       db=sg_db,
                       name=sg_user_name,
                       password=sg_user_password,
                       channels=sg_user_channels)
    sg_session = client.create_session(url=sg_admin_url,
                                       db=sg_db,
                                       name=sg_user_name,
                                       password=sg_user_password)

    log_info(
        "Starting continuous push pull replication from liteserv to sync gateway"
    )
    repl_one = client.start_replication(url=ls_url,
                                        continuous=True,
                                        from_db=ls_db,
                                        to_url=sg_url,
                                        to_db=sg_db,
                                        to_auth=sg_session)
    client.wait_for_replication_status_idle(ls_url, repl_one)

    log_info("Starting replication from sync gateway to liteserv")
    client.start_replication(url=ls_url,
                             continuous=True,
                             from_url=sg_url,
                             from_db=sg_db,
                             from_auth=sg_session,
                             to_db=ls_db)

    # Add docs to liteserv
    added_docs = add_docs_to_client_task(client=client,
                                         url=ls_url,
                                         db=ls_db,
                                         channels=sg_user_channels,
                                         num_docs=num_docs)
    log_info("Added {} docs".format(len(added_docs)))

    # start updating docs
    terminator_doc_id = 'terminator'
    with ProcessPoolExecutor() as up:
        # Start updates in background process
        updates_future = up.submit(update_docs, client, ls_url, ls_db,
                                   added_docs, sg_session, terminator_doc_id)

        # Supported upgrade process
        # 1. Upgrade SGs first docmeta -> docmeta - CBS 5.0.0 does not support TAP.
        # 2. Upgrade the CBS cluster.
        # 3. Enable import/xattrs on SGs

        # Upgrade SG docmeta -> docmeta
        cluster_util = ClusterKeywords()
        topology = cluster_util.get_cluster_topology(cluster_config,
                                                     lb_enable=False)
        sync_gateways = topology["sync_gateways"]
        sg_accels = topology["sg_accels"]

        upgrade_sync_gateway(sync_gateways, sync_gateway_version,
                             sync_gateway_upgraded_version, sg_conf,
                             cluster_config)

        if mode == "di":
            upgrade_sg_accel(sg_accels, sync_gateway_version,
                             sync_gateway_upgraded_version, sg_conf,
                             cluster_config)

        # Upgrade CBS
        cluster = Cluster(config=cluster_config)
        if len(cluster.servers) < 2:
            raise Exception("Please provide at least 3 servers")

        server_urls = []
        for server in cluster.servers:
            server_urls.append(server.url)

        primary_server = cluster.servers[0]
        secondary_server = cluster.servers[1]
        servers = cluster.servers[1:]

        upgrade_server_cluster(servers,
                               primary_server,
                               secondary_server,
                               server_version,
                               server_upgraded_version,
                               server_urls,
                               cluster_config,
                               cbs_platform,
                               toy_build=cbs_toy_build)

        # Restart SGs after the server upgrade
        sg_obj = SyncGateway()
        for sg in sync_gateways:
            sg_ip = host_for_url(sg["admin"])
            log_info("Restarting sync gateway {}".format(sg_ip))
            sg_obj.restart_sync_gateways(cluster_config=cluster_config,
                                         url=sg_ip)
            time.sleep(5)

        if mode == "di":
            ac_obj = SyncGateway()
            for ac in sg_accels:
                ac_ip = host_for_url(ac)
                log_info("Restarting sg accel {}".format(ac_ip))
                ac_obj.restart_sync_gateways(cluster_config=cluster_config,
                                             url=ac_ip)
                time.sleep(5)

        if xattrs_enabled:
            # Enable xattrs on all SG/SGAccel nodes
            # cc - Start 1 SG with import enabled, all with XATTRs enabled
            # di - All SGs/SGAccels with xattrs enabled - this will also enable import on SGAccel
            #    - Do not enable import in SG.
            if mode == "cc":
                enable_import = True
            elif mode == "di":
                enable_import = False

            if mode == "di":
                ac_obj = SyncGateway()
                for ac in sg_accels:
                    ac_ip = host_for_url(ac)
                    ac_obj.enable_import_xattrs(cluster_config=cluster_config,
                                                sg_conf=sg_conf,
                                                url=ac_ip,
                                                enable_import=False)

            sg_obj = SyncGateway()
            for sg in sync_gateways:
                sg_ip = host_for_url(sg["admin"])
                sg_obj.enable_import_xattrs(cluster_config=cluster_config,
                                            sg_conf=sg_conf,
                                            url=sg_ip,
                                            enable_import=enable_import)
                enable_import = False
                # Check Import showing up on all nodes

        send_changes_termination_doc(auth=sg_session,
                                     terminator_doc_id=terminator_doc_id,
                                     terminator_channel=sg_user_channels,
                                     ls_url=ls_url,
                                     ls_db=ls_db)
        log_info("Waiting for doc updates to complete")
        updated_doc_revs = updates_future.result()

        log_info("Stopping replication from liteserv to sync gateway")
        # Stop repl_one
        client.stop_replication(url=ls_url,
                                continuous=True,
                                from_db=ls_db,
                                to_url=sg_url,
                                to_db=sg_db,
                                to_auth=sg_session)

        log_info("Stopping replication from sync gateway to liteserv")
        # Stop repl_two
        client.stop_replication(url=ls_url,
                                continuous=True,
                                from_url=sg_url,
                                from_db=sg_db,
                                from_auth=sg_session,
                                to_db=ls_db)
        # Gather the new revs for verification
        log_info("Gathering the updated revs for verification")
        doc_ids = []
        for i in range(len(added_docs)):
            doc_ids.append(added_docs[i]["id"])
            if added_docs[i]["id"] in updated_doc_revs:
                added_docs[i]["rev"] = updated_doc_revs[added_docs[i]["id"]]

        # Verify rev, doc bdy and revision history of all docs
        verify_sg_docs_revision_history(url=sg_admin_url,
                                        db=sg_db,
                                        added_docs=added_docs)

        if xattrs_enabled:
            # Verify through SDK that there is no _sync property in the doc body
            bucket_name = 'data-bucket'
            sdk_client = Bucket('couchbase://{}/{}'.format(
                primary_server.host, bucket_name),
                                password='******',
                                timeout=SDK_TIMEOUT)
            log_info("Fetching docs from SDK")
            docs_from_sdk = sdk_client.get_multi(doc_ids)

            log_info("Verifying that there is no _sync property in the docs")
            for i in docs_from_sdk:
                if "_sync" in docs_from_sdk[i].value:
                    raise Exception(
                        "_sync section found in docs after upgrade")
예제 #14
0
def test_document_resurrection(params_from_base_test_setup, sg_conf_name, deletion_type):
    """
    Scenarios:

    Doc meta mode / tombstone
    - Create docs (set A) via Sync Gateway
    - Delete docs (set A) via Sync Gateway
    - Verify docs (set A) are deleted via Sync Gateway
    - Create docs (set A) via Sync Gateway
    - Verify revs (set A) are generation 3 via Sync Gateway

    Doc meta mode / purge
    - Create docs (set A) via Sync Gateway
    - Purge docs (set A) via Sync Gateway
    - Verify docs (set A) are deleted via Sync Gateway
    - Create docs (set A) via Sync Gateway
    - Verify revs (set A) are generation 1 via Sync Gateway

    XATTRs / tombstone
    - Create docs (set A) via Sync Gateway
    - Create docs (set B) via SDK
    - Delete SDK docs (set B) via Sync Gateway
    - Delete SG docs (set A) via SDK
    - Verify docs (set B) are deleted via Sync Gateway
    - Verify docs (set B) are deleted via SDK
    - Verify docs (set A) are deleted via Sync Gateway
    - Verify docs (set A) are deleted via SDK
    - Create docs (set A) via Sync Gateway
    - Create docs (set B) via SDK
    - Verify revs (set A, B) are generation 3 via Sync Gateway

    XATTRs / purge
    - Create docs (set A) via Sync Gateway
    - Create docs (set B) via SDK
    - Purge SDK docs (set B) via Sync Gateway
    - Delete SG docs (set A) via SDK
    - Verify docs (set B) are deleted via Sync Gateway
    - Verify docs (set B) are deleted via SDK
    - Verify docs (set A) are deleted via Sync Gateway
    - Verify docs (set A) are deleted via SDK
    - Create docs (set A) via Sync Gateway
    - Create docs (set B) via SDK
    - Verify revs (set A, B) are generation 1 via Sync Gateway

    """
    cluster_conf = params_from_base_test_setup['cluster_config']
    cluster_topology = params_from_base_test_setup['cluster_topology']
    mode = params_from_base_test_setup['mode']
    xattrs_enabled = params_from_base_test_setup['xattrs_enabled']

    cbs_url = cluster_topology['couchbase_servers'][0]
    sg_admin_url = cluster_topology['sync_gateways'][0]['admin']
    sg_url = cluster_topology['sync_gateways'][0]['public']

    bucket_name = 'data-bucket'
    sg_db = 'db'
    cbs_host = host_for_url(cbs_url)

    num_docs_per_client = 10

    # Reset cluster
    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)
    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    # Initialize clients
    sg_client = MobileRestClient()
    sdk_client = Bucket('couchbase://{}/{}'.format(cbs_host, bucket_name), password='******')

    # Create Sync Gateway user
    sg_user_channels = ['NASA', 'NATGEO']
    sg_client.create_user(url=sg_admin_url, db=sg_db, name='seth', password='******', channels=sg_user_channels)
    sg_user_auth = sg_client.create_session(url=sg_admin_url, db=sg_db, name='seth', password='******')

    # Create / Add docs from SG
    sg_doc_bodies = document.create_docs(
        doc_id_prefix='sg_doc',
        number=num_docs_per_client,
        content={'foo': 'bar'},
        channels=sg_user_channels,
        attachments_generator=attachment.generate_2_png_10_10
    )
    sg_doc_ids = [doc['_id'] for doc in sg_doc_bodies]

    sg_bulk_docs_resp = sg_client.add_bulk_docs(url=sg_url, db=sg_db, docs=sg_doc_bodies, auth=sg_user_auth)
    assert len(sg_bulk_docs_resp) == num_docs_per_client

    all_doc_ids = sg_doc_ids
    assert len(all_doc_ids) == num_docs_per_client

    if xattrs_enabled:
        #  Create / Add docs from sdk
        log_info('Adding docs via SDK')
        sdk_doc_bodies = document.create_docs(
            doc_id_prefix='sdk_doc',
            number=num_docs_per_client,
            content={'foo': 'bar'},
            channels=sg_user_channels,
        )
        sdk_docs = {doc['_id']: doc for doc in sdk_doc_bodies}
        sdk_doc_ids = [doc['_id'] for doc in sdk_doc_bodies]

        log_info('Creating SDK docs')
        sdk_client.upsert_multi(sdk_docs)

        all_doc_ids = sg_doc_ids + sdk_doc_ids
        assert len(all_doc_ids) == num_docs_per_client * 2

    if deletion_type == 'tombstone':
        # Set the target docs.
        # Doc meta mode: Delete Sync Gateway docs via Sync Gateway
        # XATTR mode: Delete SDK docs via Sync Gateway
        sg_doc_ids_to_delete = sg_doc_ids
        if xattrs_enabled:
            sg_doc_ids_to_delete = sdk_doc_ids

        # SG delete target docs
        for doc_id in sg_doc_ids_to_delete:
            doc = sg_client.get_doc(url=sg_url, db=sg_db, doc_id=doc_id, auth=sg_user_auth)
            deleted = sg_client.delete_doc(url=sg_url, db=sg_db, doc_id=doc_id, rev=doc['_rev'], auth=sg_user_auth)
            log_info(deleted)

        if xattrs_enabled:
            log_info('Deleting SG docs via SDK')
            sdk_client.remove_multi(sg_doc_ids)

    elif deletion_type == 'purge':
        # SG Purge all docs
        all_docs, errors = sg_client.get_bulk_docs(url=sg_url, db=sg_db, doc_ids=all_doc_ids, auth=sg_user_auth)
        if xattrs_enabled:
            assert len(all_docs) == num_docs_per_client * 2
            assert len(errors) == 0
        else:
            assert len(all_docs) == num_docs_per_client
            assert len(errors) == 0
        log_info('Purging docs via Sync Gateway')
        sg_client.purge_docs(url=sg_admin_url, db=sg_db, docs=all_docs)

    else:
        raise ValueError('Invalid test parameters')

    # Verify deletes via Sync Gateway
    deleted_docs_to_verify = sg_doc_ids
    assert len(deleted_docs_to_verify) == num_docs_per_client

    # If running is xattr mode, make sure to verify SG + SDK docs
    if xattrs_enabled:
        deleted_docs_to_verify = sg_doc_ids + sdk_doc_ids
        assert len(deleted_docs_to_verify) == num_docs_per_client * 2

    if xattrs_enabled and deletion_type == 'tombstone':

        # Verify SDK + SG docs are deleted from Sync Gateway
        verify_sg_deletes(sg_client, sg_url, sg_db, deleted_docs_to_verify, sg_user_auth)

        # Verify SDK + SG docs are deleted from SDK
        verify_sdk_deletes(sdk_client, deleted_docs_to_verify)

    elif xattrs_enabled and deletion_type == 'purge':

        # Verify SDK + SG docs are purged from Sync Gateway
        verify_sg_purges(sg_client, sg_url, sg_db, deleted_docs_to_verify, sg_user_auth)

        # Verify SDK + SG docs are deleted from SDK
        verify_sdk_deletes(sdk_client, deleted_docs_to_verify)

    elif not xattrs_enabled and deletion_type == 'tombstone':

        # Doc meta: Verify SG docs are all deleted via SG
        verify_sg_deletes(sg_client, sg_url, sg_db, deleted_docs_to_verify, sg_user_auth)

    elif not xattrs_enabled and deletion_type == 'purge':

        # Doc meta: Verify SG docs are all deleted via SG
        verify_sg_purges(sg_client, sg_url, sg_db, deleted_docs_to_verify, sg_user_auth)

    else:
        raise ValueError('Invalid test parameters')

    # Recreate deleted docs from Sync Gateway
    sg_bulk_docs_resp = sg_client.add_bulk_docs(url=sg_url, db=sg_db, docs=sg_doc_bodies, auth=sg_user_auth)
    assert len(sg_bulk_docs_resp) == num_docs_per_client

    if xattrs_enabled:
        log_info('Recreating SDK docs')
        # Recreate deleted docs from SDK
        sdk_client.upsert_multi(sdk_docs)

    # Get docs via Sync Gateway
    doc_ids_to_get = sg_doc_ids
    if xattrs_enabled:
        doc_ids_to_get = sg_doc_ids + sdk_doc_ids
    docs, errors = sg_client.get_bulk_docs(
        url=sg_url,
        db=sg_db,
        doc_ids=doc_ids_to_get,
        auth=sg_user_auth,
        validate=False
    )
    if xattrs_enabled:
        assert len(docs) == num_docs_per_client * 2
        assert len(errors) == 0
    else:
        assert len(docs) == num_docs_per_client
        assert len(errors) == 0

    if xattrs_enabled:

        # Get SDK docs and makes sure all docs were recreated
        all_docs_from_sdk = sdk_client.get_multi(doc_ids_to_get)
        assert len(all_docs_from_sdk) == num_docs_per_client * 2
        log_info('Found: {} recreated docs via SDK'.format(len(all_docs_from_sdk)))

        # Make sure we are able to get recreated docs via SDK
        doc_ids_to_get_scratch = list(doc_ids_to_get)
        assert len(doc_ids_to_get_scratch) == num_docs_per_client * 2
        for doc_id in all_docs_from_sdk:
            doc_ids_to_get_scratch.remove(doc_id)
        assert len(doc_ids_to_get_scratch) == 0

    # Make sure we are able to get recreated docs via SDK
    doc_ids_to_get_scratch = list(doc_ids_to_get)
    if xattrs_enabled:
        # SG + SDK docs
        assert len(doc_ids_to_get_scratch) == num_docs_per_client * 2
    else:
        # SG docs
        assert len(doc_ids_to_get_scratch) == num_docs_per_client

    for doc in docs:
        # Verify expected document revisions
        if xattrs_enabled:
            if deletion_type == 'purge':
                # SG purges SG docs and recreates them, expecting 1- rev
                # SDK removes SDK docs and recreates them, expecting 1- rev
                assert doc['_rev'].startswith('1-')
            else:
                # SG tombstones SG docs and recreates them, expecting 3- rev
                # SDK removes SDK docs and recreates them, expecting 1- rev
                if doc['_id'].startswith('sg_'):
                    assert doc['_rev'].startswith('3-')
                else:
                    assert doc['_rev'].startswith('1-')
        else:
            if deletion_type == 'purge':
                # SG purges SG docs and recreates them, expecting 1- rev
                assert doc['_rev'].startswith('1-')
            else:
                # SG tombstones SG docs and recreates them, expecting 3- rev
                assert doc['_rev'].startswith('3-')

        doc_ids_to_get_scratch.remove(doc['_id'])

    # Make sure all docs were found
    assert len(doc_ids_to_get_scratch) == 0
def test_concurrent_updates_no_conflicts(params_from_base_test_setup, sg_conf_name, num_of_docs, revs_limit):
    """@summary Test with concurrent updates with no conflicts enabled
    Test case link : https://docs.google.com/spreadsheets/d/1YwI_gCeoBebQKBybkzoAEoXSc0XLReszDA-mPFQapgk/edit#gid=0
    covered #15
    Steps:
    1. Start sg with some revs_limit specified
    2. Add docs to SG.
    3. Update docs few times via sg .
    4. Update docs few times vis sdk concurrently with sg.
        -> There are chances of getting conflict errors on both, handled the error appropriately
    5. update docs few number of times.
    6. Verify it can maintain default revisions.
    7. Verify previous revisions deleted and revisions maintained based on revs_limit
    """

    # Setup
    cluster_config = params_from_base_test_setup["cluster_config"]
    topology = params_from_base_test_setup["cluster_topology"]
    no_conflicts_enabled = params_from_base_test_setup["no_conflicts_enabled"]
    mode = params_from_base_test_setup["mode"]
    sg_url = topology["sync_gateways"][0]["public"]
    sg_admin_url = topology["sync_gateways"][0]["admin"]
    sg_db = "db"
    if revs_limit is None:
        revs_limit = 1000
    additional_updates = revs_limit
    total_updates = revs_limit + additional_updates
    if not no_conflicts_enabled:
        pytest.skip('--no-conflicts is not enabled, so skipping the test')
    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    # 1. Start sg
    c = cluster.Cluster(cluster_config)
    c.reset(sg_conf)

    sg_client = MobileRestClient()
    channels = ["no-conflicts"]
    sg_client.create_user(url=sg_admin_url, db=sg_db, name='autotest', password='******', channels=channels)
    autouser_session = sg_client.create_session(url=sg_admin_url, db=sg_db, name='autotest', password='******')

    temp_cluster_config = copy_to_temp_conf(cluster_config, mode)
    persist_cluster_config_environment_prop(temp_cluster_config, 'revs_limit', revs_limit, property_name_check=False)
    status = c.sync_gateways[0].restart(config=sg_conf, cluster_config=temp_cluster_config)
    assert status == 0, "Syncgateway did not start after no conflicts is enabled"
    # end of Set up

    # 2. Add docs to SG.
    sgdoc_bodies = document.create_docs(doc_id_prefix='sg_docs', number=num_of_docs,
                                        attachments_generator=attachment.generate_2_png_10_10, channels=channels)
    sg_docs = sg_client.add_bulk_docs(url=sg_url, db=sg_db, docs=sgdoc_bodies, auth=autouser_session)
    assert len(sgdoc_bodies) == num_of_docs

    # Connect to server via SDK
    log_info('Connecting to bucket ...')
    bucket_name = 'data-bucket'
    cbs_url = topology['couchbase_servers'][0]
    cbs_ip = host_for_url(cbs_url)
    sdk_client = Bucket('couchbase://{}/{}'.format(cbs_ip, bucket_name), password='******', timeout=SDK_TIMEOUT)
    sg_doc_ids = [doc['id'] for doc in sg_docs]
    sdk_docs_resp = sdk_client.get_multi(sg_doc_ids)

    # Update the same documents concurrently from a sync gateway client and and sdk client
    with ThreadPoolExecutor(max_workers=9) as tpe:

        update_from_sdk_task = tpe.submit(sdk_bulk_update, sdk_client, sdk_docs_resp, 10)
        update_from_sg_task = tpe.submit(sg_doc_updates, sg_client, sg_url=sg_url, sg_db=sg_db, sg_docs=sg_docs, number_updates=10,
                                         auth=autouser_session, channels=channels)

        update_from_sg_task.result()
        update_from_sdk_task.result()

    # 3. Update the docs few times
    prev_revs = []
    for i in xrange(total_updates):
        update_sg_docs = sg_client.update_docs(url=sg_url, db=sg_db, docs=sg_docs, number_updates=1, delay=None,
                                               auth=autouser_session, channels=channels)
        rev = update_sg_docs[0]['rev'].split('-')[1]
        prev_revs.append(rev)

    # 4. Verify it can maintain default revisions.
    # 5. Verify previous revisions deleted.
    for doc in sg_docs:
        num_of_revs = sg_client.get_revs_num_in_history(url=sg_url, db=sg_db, doc_id=doc["id"], auth=autouser_session)
        assert len(num_of_revs) == revs_limit, "Number of revisions in history is more than revs_limit set in sg config"
        for i in xrange(additional_updates):
            assert prev_revs[i] not in num_of_revs