예제 #1
0
파일: views.py 프로젝트: dmitrof/nihongo
def confirm_ir(request, ir_id, group_id, user_id):
    print("IR CONFIRMATION")
    c = Bucket('couchbase://localhost/nihongo')
    user_doc = c.get(user_id).value
    password = user_doc['password']
    ir_doc = c.get(ir_id).value


    if 'accept' in request.POST:
        print("IR ACCEPTED")
        ir_doc['confirmed'] = 'accepted'
        sync_user = SyncGateway.get_user(user_id)
        new_sync_user = {}
        admin_channels = sync_user['admin_channels']
        #all_channels = sync_user['all_channels']
        admin_channels.append(group_id)

        SyncGateway.put_user(sync_user['name'], '*****@*****.**', password, admin_channels)
        print(sync_user)

    elif 'decline' in request.POST:
        ir_doc['confirmed'] = 'declined'
        print("IR DECLINED")
    c.upsert(ir_id, ir_doc)
    return HttpResponseRedirect(reverse('tutor:tutor_groups'))
예제 #2
0
파일: views.py 프로젝트: dmitrof/nihongo
    def post(self, request, group_id, deck_id):
        c = Bucket('couchbase://localhost/nihongo')
        success = 'dunno'
        print('deleting deck')
        try:
            c.delete(deck_id)
            group = c.get(group_id).value
            print(group.get('decks_list'))
            group.get('decks_list').remove(deck_id)
            c.upsert(group_id, group)
            success = 'success'
        except (BaseException, CouchbaseError) as e:
            success = 'error'
            print(e)

        group = c.get(group_id).value
        group_decks = group.get('decks_list')
        decks_list = []
        for d in group_decks:
            try:
                deck = c.get(d)
                decks_list.append(deck)
            except CouchbaseError:
                pass
        return HttpResponseRedirect(reverse('tutor:group_decks', kwargs={'group_id' : group_id}))
예제 #3
0
파일: views.py 프로젝트: dmitrof/nihongo
    def post(self, request, group_id):
        c = Bucket('couchbase://localhost/nihongo')
        success = 'dunno'
        constgroup = group_id.rsplit('_', 1)[0]
        print(constgroup)
        print('adding new deck')
        try:
            description = request.POST['description']
            print(description)
            ckey = 'deck_' + str(uuid4()).replace('-', '_')

            newdeck = {'doc_type' : 'deck', 'description' : description, 'deck_name' : description}
            newdeck['cards_list'] = []
            newdeck['doc_channels'] = [group_id]
            c.insert(ckey, newdeck)
            group = c.get(group_id).value
            print(group.get('decks_list'))
            group.get('decks_list').append(ckey)
            c.upsert(group_id, group)
            success = 'success'
        except (BaseException, CouchbaseError) as e:
            success = 'error'
            print(e)

        group = c.get(group_id).value
        group_decks = group.get('decks_list')
        decks_list = []
        for d in group_decks:
            try:
                deck = c.get(d)
                decks_list.append(deck)
            except CouchbaseError:
                pass
        return HttpResponseRedirect(reverse('tutor:group_decks', kwargs={'group_id' : group_id}))
예제 #4
0
 def check_dataloss(self, server, bucket):
     from couchbase.bucket import Bucket
     from couchbase.exceptions import NotFoundError
     from lib.memcached.helper.data_helper import VBucketAwareMemcached
     bkt = Bucket('couchbase://{0}/{1}'.format(server.ip, bucket.name), username="******", password="******")
     rest = RestConnection(self.master)
     VBucketAware = VBucketAwareMemcached(rest, bucket.name)
     _, _, _ = VBucketAware.request_map(rest, bucket.name)
     batch_start = 0
     batch_end = 0
     batch_size = 10000
     errors = []
     while self.num_items > batch_end:
         batch_end = batch_start + batch_size
         keys = []
         for i in xrange(batch_start, batch_end, 1):
             keys.append(str(i).rjust(20, '0'))
         try:
             bkt.get_multi(keys)
             self.log.info("Able to fetch keys starting from {0} to {1}".format(keys[0], keys[len(keys)-1]))
         except Exception as e:
             self.log.error(e)
             self.log.info("Now trying keys in the batch one at a time...")
             key = ''
             try:
                 for key in keys:
                     bkt.get(key)
             except NotFoundError:
                 vBucketId = VBucketAware._get_vBucket_id(key)
                 errors.append("Missing key: {0}, VBucketId: {1}".
                               format(key, vBucketId))
         batch_start += batch_size
     return errors
예제 #5
0
 def check_dataloss(self, server, bucket):
     from couchbase.bucket import Bucket
     from couchbase.exceptions import NotFoundError
     from lib.memcached.helper.data_helper import VBucketAwareMemcached
     bkt = Bucket('couchbase://{0}/{1}'.format(server.ip, bucket.name))
     rest = RestConnection(self.master)
     VBucketAware = VBucketAwareMemcached(rest, bucket.name)
     _, _, _ = VBucketAware.request_map(rest, bucket.name)
     batch_start = 0
     batch_end = 0
     batch_size = 10000
     errors = []
     while self.num_items > batch_end:
         batch_end = batch_start + batch_size
         keys = []
         for i in xrange(batch_start, batch_end, 1):
             keys.append(str(i).rjust(20, '0'))
         try:
             bkt.get_multi(keys)
             self.log.info("Able to fetch keys starting from {0} to {1}".format(keys[0], keys[len(keys)-1]))
         except Exception as e:
             self.log.error(e)
             self.log.info("Now trying keys in the batch one at a time...")
             key = ''
             try:
                 for key in keys:
                     bkt.get(key)
             except NotFoundError:
                 vBucketId = VBucketAware._get_vBucket_id(key)
                 errors.append("Missing key: {0}, VBucketId: {1}".
                               format(key, vBucketId))
         batch_start += batch_size
     return errors
예제 #6
0
def purge_job_details(doc_id, type, olderBuild=False):
    client = Bucket(HOST + '/' + type)
    build_client = Bucket(HOST + '/' + 'builds')
    try:
        job = client.get(doc_id).value
        if 'build' not in job:
            return
        build = job['build']
        build_document = build_client.get(build).value
        os = job['os']
        name = job['name']
        build_id = job['build_id']
        component = job['component']
        if (build_document['os'][os][component].__len__() == 0
                or name not in build_document['os'][os][component]):
            return
        to_del_job = [
            t for t in build_document['os'][os][component][name]
            if t['build_id'] == build_id
        ]
        if to_del_job.__len__() == 0:
            return
        to_del_job = to_del_job[0]
        if olderBuild and not to_del_job['olderBuild']:
            to_del_job['olderBuild'] = True
            build_document['totalCount'] -= to_del_job['totalCount']
            build_document['failCount'] -= to_del_job['failCount']
        elif not olderBuild:
            to_del_job['deleted'] = True
        build_client.upsert(build, build_document)
    except Exception:
        pass
예제 #7
0
 def get(self,info):
     device_id, temp, type, status = info.split(':')
     bucket = Bucket('couchbase://46.101.11.33:8091/devices')
     res = bucket.get(device_id, quiet=True)
     if res.success:
         bucket.n1ql_query('UPSERT INTO devices (KEY,VALUE) VALUES ("%s",{"device_id":"%s", "temp":"%s", "type":"%s", "status":"%s"})' % (device_id, device_id, temp, type, status)).execute()
         res = bucket.get(device_id, quiet=True)
         return res.value
     else:
         return {"errCode": "-1", "errMsg": "Could not find device %s" % device_id}
    def create_bucket(self, name, ramQuotaMB=1024):
        """
        1. Create CBS bucket via REST
        2. Create client connection and poll until bucket is available
           Catch all connection exception and break when KeyNotFound error is thrown
        3. Verify all server nodes are in a 'healthy' state before proceeding

        Followed the docs below that suggested this approach.
        http://docs.couchbase.com/admin/admin/REST/rest-bucket-create.html
        """

        log_info("Creating bucket {} with RAM {}".format(name, ramQuotaMB))

        data = {
            "name": name,
            "ramQuotaMB": str(ramQuotaMB),
            "authType": "sasl",
            "proxyPort": "11211",
            "bucketType": "couchbase",
            "flushEnabled": "1"
        }

        resp = self._session.post("{}/pools/default/buckets".format(self.url), data=data)
        log_r(resp)
        resp.raise_for_status()

        # Create client an retry until KeyNotFound error is thrown
        start = time.time()
        while True:

            if time.time() - start > keywords.constants.CLIENT_REQUEST_TIMEOUT:
                raise Exception("TIMEOUT while trying to create server buckets.")
            try:
                bucket = Bucket("couchbase://{}/{}".format(self.host, name))
                bucket.get('foo')
            except ProtocolError:
                log_info("Client Connection failed: Retrying ...")
                time.sleep(1)
                continue
            except TemporaryFailError:
                log_info("Failure from server: Retrying ...")
                time.sleep(1)
                continue
            except NotFoundError:
                log_info("Key not found error: Bucket is ready!")
                break

        self.wait_for_ready_state()

        return name
예제 #9
0
 def check_dataloss(self, server, bucket, num_items):
     from couchbase.bucket import Bucket
     from couchbase.exceptions import NotFoundError, CouchbaseError
     from lib.memcached.helper.data_helper import VBucketAwareMemcached
     self.log.info(
         "########## validating data for bucket : {} ###########".format(
             bucket))
     cb_version = cb_version = RestConnection(
         server).get_nodes_version()[:3]
     if cb_version < "5":
         bkt = Bucket('couchbase://{0}/{1}'.format(server.ip, bucket.name),
                      timeout=5000)
     else:
         bkt = Bucket('couchbase://{0}/{1}'.format(server.ip, bucket.name),
                      username=server.rest_username,
                      password=server.rest_password,
                      timeout=5000)
     rest = RestConnection(self.master)
     VBucketAware = VBucketAwareMemcached(rest, bucket.name)
     _, _, _ = VBucketAware.request_map(rest, bucket.name)
     batch_start = 0
     batch_end = 0
     batch_size = 10000
     errors = []
     while num_items > batch_end:
         batch_end = batch_start + batch_size
         keys = []
         for i in xrange(batch_start, batch_end, 1):
             keys.append(str(i).rjust(20, '0'))
         try:
             bkt.get_multi(keys)
             self.log.info(
                 "Able to fetch keys starting from {0} to {1}".format(
                     keys[0], keys[len(keys) - 1]))
         except CouchbaseError as e:
             self.log.error(e)
             ok, fail = e.split_results()
             if fail:
                 for key in fail:
                     try:
                         bkt.get(key)
                     except NotFoundError:
                         vBucketId = VBucketAware._get_vBucket_id(key)
                         errors.append(
                             "Missing key: {0}, VBucketId: {1}".format(
                                 key, vBucketId))
         batch_start += batch_size
     self.log.info("Total missing keys:{}".format(len(errors)))
     self.log.info(errors)
     return errors
예제 #10
0
def check_dataloss(ip, bucket, num_items):
    from couchbase.bucket import Bucket
    from couchbase.exceptions import NotFoundError
    bkt = Bucket('couchbase://{0}/{1}'.format(ip, bucket),
                 username="******",
                 password="******")
    batch_start = 2000000
    batch_end = 0
    batch_size = 10000
    errors = []
    missing_keys = []
    errors_replica = []
    missing_keys_replica = []
    while num_items > batch_end:
        batch_end = batch_start + batch_size
        keys = []
        for i in xrange(batch_start, batch_end, 1):
            keys.append(str(i))
        try:
            bkt.get_multi(keys)
            print("Able to fetch keys starting from {0} to {1}".format(
                keys[0], keys[len(keys) - 1]))
        except CouchbaseError as e:
            print(e)
            ok, fail = e.split_results()
            if fail:
                for key in fail:
                    try:
                        bkt.get(key)
                    except NotFoundError:
                        errors.append("Missing key: {0}".format(key))
                        missing_keys.append(key)
        try:
            bkt.get_multi(keys, replica=True)
            print("Able to fetch keys starting from {0} to {1} in replica ".
                  format(keys[0], keys[len(keys) - 1]))
        except (CouchbaseError, CouchbaseNetworkError,
                CouchbaseTransientError) as e:
            print(e)
            ok, fail = e.split_results()
            if fail:
                for key in fail:
                    try:
                        bkt.get(key)
                    except NotFoundError:
                        errors_replica.append("Missing key: {0}".format(key))
                        missing_keys_replica.append(key)
        batch_start += batch_size
    return errors, missing_keys, errors_replica, missing_keys_replica
예제 #11
0
 def get(self, info):
     device_id, temp, type, status = info.split(':')
     bucket = Bucket('couchbase://46.101.11.33:8091/devices')
     res = bucket.get(device_id, quiet=True)
     if res.success:
         bucket.n1ql_query(
             'UPSERT INTO devices (KEY,VALUE) VALUES ("%s",{"device_id":"%s", "temp":"%s", "type":"%s", "status":"%s"})'
             % (device_id, device_id, temp, type, status)).execute()
         res = bucket.get(device_id, quiet=True)
         return res.value
     else:
         return {
             "errCode": "-1",
             "errMsg": "Could not find device %s" % device_id
         }
예제 #12
0
 def get(self,device_id):
     bucket = Bucket('couchbase://46.101.11.33:8091/devices')
     res = bucket.get(device_id, quiet=True)
     if res.success:
         return res.value
     else:
         return {"errCode": "-1", "errMsg": "Could not find device %s" % device_id}
예제 #13
0
    def get_phrases(self, cb_url, output_file, input_file, docs_total):
        cb = Bucket("couchbase://{}/{}?operation_timeout=10".format(
            cb_url, "bucket-1"),
                    password="******")
        lines = self._shuffle_and_cut(input_file, 10**6)
        formatted_lines = list()
        for line in lines:
            formatted_lines.append(line.split()[0])
        lines = formatted_lines
        results = set()
        for docid in range(1, docs_total - 1):
            key = hex(random.randint(1, docs_total))[2:]
            try:
                txt = cb.get(key).value
                if txt["text"]:
                    txt = txt["text"].encode('ascii', 'ignore')
                    terms = txt.split(' ')
                    for idx, term in enumerate(terms):
                        if term in lines:
                            if len(terms) > idx + 1:
                                term_next = terms[idx + 1]
                                if str.isalpha(term_next):
                                    result_phrase = "{} {}".format(
                                        term, term_next)
                                    results.add(result_phrase)
            except Exception as e:
                print(("{}: {}: {}".format(key, len(results), str(e))))

            if len(results) > self.limit:
                break

        output_file = open(output_file, "w")
        for phrase in results:
            print(phrase, file=output_file)
예제 #14
0
class NumericExctractor:
    def __init__(self, cb_url, bucket_name, items):
        self.distance = 39000
        self.output_lines_goal = 1000
        self.total_docs = items
        self.cb = Bucket("couchbase://{}/{}?operation_timeout=10".format(cb_url, bucket_name), password="******")
        self.output_list = list()

    def run(self):
        n = 0
        i = 0
        while i<self.output_lines_goal:
            n, min = self.read_number(n)
            max = min + random.randint(999999, 9999999999)
            self.output_list.append("{}   max".format(max))
            self.output_list.append("{}   min".format(min))
            self.output_list.append("{}:{}   max_min".format(max, min))
            i += 1
            print(i)
        self.write_and_exit()

    def read_number(self, n):
        step = random.randint(0, self.distance)
        n = n + step
        if n >= self.total_docs:
            self.write_and_exit()
        k = hex(n)[2:]
        v = self.cb.get(k).value
        return n, int(v['time'])

    def write_and_exit(self):
        f = open("numeric-dgm.txt", "w")
        f.write('\n'.join(self.output_list))
        f.close()
예제 #15
0
    def get_phrases(self, cb_url, output_file, input_file, docs_total):
        cb = Bucket("couchbase://{}/{}?operation_timeout=10".format(cb_url, "bucket-1"), password="******")
        lines = self._shuffle_and_cut(input_file, 10 ** 6)
        formatted_lines = list()
        for line in lines:
            formatted_lines.append(line.split()[0])
        lines = formatted_lines
        results = set()
        for docid in range(1, docs_total - 1):
            key = hex(random.randint(1, docs_total))[2:]
            try:
                txt = cb.get(key).value
                if txt["text"]:
                    txt = txt["text"].encode('ascii', 'ignore')
                    terms = txt.split(' ')
                    for idx, term in enumerate(terms):
                        if term in lines:
                            if len(terms) > idx + 1:
                                term_next = terms[idx + 1]
                                if str.isalpha(term_next):
                                    result_phrase = "{} {}".format(term, term_next)
                                    results.add(result_phrase)
            except Exception as e:
                print(("{}: {}: {}".format(key, len(results), str(e))))

            if len(results) > self.limit:
                break

        output_file = open(output_file, "w")
        for phrase in results:
            print(phrase, file=output_file)
예제 #16
0
 def get(self, id):
     bucket = Bucket(self._bucketUrl)
     try:
         result = bucket.get(id)
         return result.value
     except NotFoundError:
         return None
    def test_add_concurrent(self):
        DOCID = 'subdoc_doc_id'
        CONNSTR = 'couchbase://' + self.servers[0].ip + ':11210'
        ITERATIONS = 200
        THREADS = 20

        main_bucket = SDK_Bucket(CONNSTR)
        main_bucket.upsert(DOCID, {'recs':[]})

        thrs = []

        class Runner(Thread):
            def run(self, *args, **kw):
                cb = SDK_Bucket(CONNSTR)
                for x in range(ITERATIONS):
                    cb.mutate_in(DOCID, SD.array_append('recs', 1))

        thrs = [Runner() for x in range(THREADS)]
        [t.start() for t in thrs]
        [t.join() for t in thrs]

        obj = main_bucket.get(DOCID)

        array_entry_count = len(obj.value['recs'])

        self.assertTrue( array_entry_count ==  ITERATIONS * THREADS,
                         'Incorrect number of array entries. Expected {0} actual {1}'.format(ITERATIONS * THREADS,
                                                                           array_entry_count))
예제 #18
0
파일: views.py 프로젝트: dmitrof/nihongo
    def post(self, request, group_id, deck_id, card_id, is_new = False):
        post_par = []
        c = Bucket('couchbase://localhost/nihongo')
        for param in request.POST:
            pass
            #print(param + " " + request.POST[param])

        #words = request.POST.getlist("word[]")
        cardBuilder = CardBuilder();
        pages = request.POST.getlist("pages[]")
        card_to_save = cardBuilder.processPOST(pages, request.POST)

        is_new = request.POST.get('is_new', False)
        if is_new:
            deck = c.get(deck_id).value
            print(deck.get('cards_list'))
            deck['cards_list'].append(card_id)
            c.upsert(deck_id, deck)


        #print(cardBuilder.processPOST(pages, request.POST))
        card_to_save['doc_channels'] = [group_id]
        c.upsert(card_id, card_to_save)

        #for word in words:
         #   print(word)

        return HttpResponseRedirect(reverse('tutor:edit_card', kwargs={'group_id' : group_id, 'deck_id' : deck_id,'card_id' : card_id}))
예제 #19
0
def get_user(bucket: Bucket, username: str):
    doc_id = f"userprofile::{username}"
    result = bucket.get(doc_id, quiet=True)
    if not result.value:
        return None
    user = UserInDB(**result.value)
    return user
    def test_add_concurrent(self):
        DOCID = 'subdoc_doc_id'
        CONNSTR = 'couchbase://' + self.servers[0].ip + ':11210'
        ITERATIONS = 200
        THREADS = 20

        main_bucket = SDK_Bucket(CONNSTR)
        main_bucket.upsert(DOCID, {'recs':[]})

        class Runner(Thread):
            def run(self, *args, **kw):
                cb = SDK_Bucket(CONNSTR)
                for x in range(ITERATIONS):
                    cb.mutate_in(DOCID, SD.array_append('recs', 1))

        thrs = [Runner() for x in range(THREADS)]
        [t.start() for t in thrs]
        [t.join() for t in thrs]

        obj = main_bucket.get(DOCID)

        array_entry_count = len(obj.value['recs'])

        self.assertTrue( array_entry_count ==  ITERATIONS * THREADS,
                         'Incorrect number of array entries. Expected {0} actual {1}'.format(ITERATIONS * THREADS,
                                                                           array_entry_count))
def main():
    from argparse import ArgumentParser
    import time

    ap = ArgumentParser()
    ap.add_argument('-C', '--couchbase',
                    help='Couchbase connection string',
                    default='couchbase://localhost')
    ap.add_argument('-M', '--memcached',
                    help='List of memcached hosts to use in host:port format',
                    action='append',
                    default=[])
    options = ap.parse_args()

    # Get memcached hosts
    mc_hosts = []
    for server in options.memcached:
        host, port = server.split(':')
        port = int(port)
        mc_hosts.append((host, port))

    mirror = CouchbaseMemcacheMirror(options.couchbase, mc_hosts)
    value = {
        'entry': 'Mirror value',
        'updated': time.time()
    }
    mirror.set('mirrkey', value)

    # Create individual clients, to demonstrate
    cb = CbBucket(options.couchbase)
    print 'Value from couchbase: {}'.format(cb.get('mirrkey').value)
    print 'Value from Memcached: {}'.format(McClient(mc_hosts).get('mirrkey'))
예제 #22
0
파일: views.py 프로젝트: dmitrof/nihongo
    def get(self, request, group_id):
        print("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAa")

        c = Bucket('couchbase://localhost/nihongo')
        group = c.get(group_id).value
        group_decks = group.get('decks_list')
        decks_list = []
        for d in group_decks:
            try:
                deck = c.get(d)
                decks_list.append(deck)
            except CouchbaseError:
                pass
        return render(request, self.template_name, {
             'decks_list' : decks_list,  'group' : group.get('group_name'), 'group_id' : group_id
        })
예제 #23
0
class Cache(object):

    """ 缓存系统 """

    def __init__(self, host='localhost', port=12000, debug=0):
        try:
            self._db = Bucket('couchbase://192.168.1.112/Cache')
        except:
            self._db = None

    def get(self, key):
        if self._db:
            try:
                v = self._db.get(_toUTF8(key))
                value = v.value.encode('gbk')
                return value
            except CouchbaseError:
                return None
        return None

    def set(self, key, value, expire_seconds=0):
        if self._db:
            key = _toUTF8(key)
            value = _toUTF8(value)
            if expire_seconds:
                return self._db.setex(key, value, expire_seconds)
            else:
                return self._db.set(key, value)
        return None
예제 #24
0
    def test_create_ephemeral_bucket_and_use(self):
        bucket_name = 'ephemeral'
        password = '******'

        # create ephemeral test bucket
        self.admin.bucket_create(name=bucket_name,
                                 bucket_type='ephemeral',
                                 ram_quota=100,
                                 bucket_password=password)
        self.admin.wait_ready(bucket_name, timeout=10)

        # connect to bucket to ensure we can use it
        conn_str = "http://{0}:{1}/{2}".format(self.cluster_info.host,
                                               self.cluster_info.port,
                                               bucket_name)
        bucket = Bucket(connection_string=conn_str, password=password)
        self.assertIsNotNone(bucket)

        # create a doc then read it back
        key = 'mike'
        doc = {'name': 'mike'}
        bucket.upsert(key, doc)
        result = bucket.get(key)

        # original and result should be the same
        self.assertEqual(doc, result.value)
def get_doc(
    bucket: Bucket, *, doc_id: str, doc_model: Type[PydanticModel]
) -> Optional[PydanticModel]:
    result = bucket.get(doc_id, quiet=True)
    if not result.value:
        return None
    model = doc_model(**result.value)
    return model
예제 #26
0
 def _get_documets(self, bucket_name, field):
     bucket = Bucket('couchbase://{ip}/{name}'.format(ip=self.master.ip, name=bucket_name))
     if not bucket:
         log.info("Bucket connection is not established.")
     log.info("Updating {0} in all documents in bucket {1}...".format(field, bucket_name))
     query = "SELECT * FROM {0}".format(bucket_name)
     for row in bucket.n1ql_query(query):
         yield row[bucket.bucket]['_id'], bucket.get(key=row[bucket.bucket]['_id']).value
 def _get_documets(self, bucket_name, field):
     bucket = Bucket('couchbase://{ip}/{name}'.format(ip=self.master.ip, name=bucket_name))
     if not bucket:
         log.info("Bucket connection is not established.")
     log.info("Updating {0} in all documents in bucket {1}...".format(field, bucket_name))
     query = "SELECT * FROM {0}".format(bucket_name)
     for row in bucket.n1ql_query(query):
         yield row[bucket.bucket]['_id'], json.loads(bucket.get(key=row[bucket.bucket]['_id']).value)
예제 #28
0
def get_user(bucket: Bucket, name: str):
    doc_id = get_user_doc_id(name)
    result = bucket.get(doc_id, quiet=True)
    if not result.value:
        return None
    print(result.value)
    user = UserStored(**result.value)
    user.Meta.key = result.key
    return user
예제 #29
0
 def get(self, device_id):
     bucket = Bucket('couchbase://46.101.11.33:8091/devices')
     res = bucket.get(device_id, quiet=True)
     if res.success:
         return res.value
     else:
         return {
             "errCode": "-1",
             "errMsg": "Could not find device %s" % device_id
         }
 def _get_documets(self, bucket_name, field):
     bucket = Bucket('couchbase://{ip}/{name}'.format(ip=self.master.ip, name=bucket_name))
     if not bucket:
         log.info("Bucket connection is not established.")
     log.info("Updating {0} in all documents in bucket {1}...".format(field, bucket_name))
     for i in range(self.docs_per_day):
         for j in range(self.docs_per_day):
             key = "array_dataset-" + str(i) + "-" + str(j)
             document = bucket.get(key=key).value
             yield key, document
예제 #31
0
파일: views.py 프로젝트: dmitrof/nihongo
    def get(self, request, group_id, deck_id):
        response = "hello, {}".format(deck_id)
        b = Bucket('couchbase://localhost/nihongo')
        deck = b.get(deck_id)
        cards_list = deck.value.get('cards_list')
        cards_set = []
        for c in cards_list:
            try:
                card = b.get(c)
                cards_set.append(card)
            except CouchbaseError:
                pass

        rulesProvider = RulesProvider()

        task_types = rulesProvider.provideTaskTypeList()
        #return HttpResponse(response)
        return render(request, 'tutor/deck_detail.html', { 'group_id' : group_id,
            'deck_id' : deck_id, 'deck' : deck.value, 'cards_set' : cards_set, 'task_types' : task_types,
        })
def usernameIsValue(username):
    print username
    try:
        bucket = Bucket("couchbase://localhost/default")
        rv = bucket.get(username)
        if rv is not None:
            return True

    except Exception as e:
        print "not found"
        send_simple_message(username)
예제 #33
0
 def check_dataloss(self, server, bucket, num_items):
     from couchbase.bucket import Bucket
     from couchbase.exceptions import NotFoundError,CouchbaseError
     from lib.memcached.helper.data_helper import VBucketAwareMemcached
     self.log.info("########## validating data for bucket : {} ###########".format(bucket))
     cb_version= cb_version = RestConnection(server).get_nodes_version()[:3]
     if cb_version < "5":
         bkt = Bucket('couchbase://{0}/{1}'.format(server.ip, bucket.name),timeout=5000)
     else:
         bkt = Bucket('couchbase://{0}/{1}'.format(server.ip, bucket.name),username=server.rest_username,
                      password=server.rest_password,timeout=5000)
     rest = RestConnection(self.master)
     VBucketAware = VBucketAwareMemcached(rest, bucket.name)
     _, _, _ = VBucketAware.request_map(rest, bucket.name)
     batch_start = 0
     batch_end = 0
     batch_size = 10000
     errors = []
     while num_items > batch_end:
         batch_end = batch_start + batch_size
         keys = []
         for i in xrange(batch_start, batch_end, 1):
             keys.append(str(i).rjust(20, '0'))
         try:
             bkt.get_multi(keys)
             self.log.info("Able to fetch keys starting from {0} to {1}".format(keys[0], keys[len(keys) - 1]))
         except CouchbaseError as e:
             self.log.error(e)
             ok, fail = e.split_results()
             if fail:
                 for key in fail:
                     try:
                         bkt.get(key)
                     except NotFoundError:
                         vBucketId = VBucketAware._get_vBucket_id(key)
                         errors.append("Missing key: {0}, VBucketId: {1}".
                                       format(key, vBucketId))
         batch_start += batch_size
     self.log.info("Total missing keys:{}".format(len(errors)))
     self.log.info(errors)
     return errors
예제 #34
0
class TaskService(object):
    def __init__(self):
        self._bucket = Bucket("couchbase://localhost/Tasks", password="******")

    def get_tasks(self):
        rows = self._bucket.query("tasks", "all", stale=False)
        tasks = []
        for row in rows:
            result = self._bucket.get(row.key)
            task = result.value
            task["id"] = row.key
            tasks.append(task)
        return tasks

    def get_task(self, key):
        task = self._bucket.get(key)
        return task.value

    def create_task(self, task):
        key = uuid.uuid1()
        task["type"] = "task"
        task["isComplete"] = False
        self._bucket.insert(str(key), task)

    def update_task(self, key, task):
        saved_task = self.get_task(key)
        saved_task["title"] = task["title"]
        saved_task["description"] = task["description"]
        saved_task["dueDate"] = task["dueDate"]
        saved_task["isComplete"] = task["isComplete"]
        saved_task["parentId"] = task["parentId"]

        self._bucket.upsert(key, saved_task)

    def delete_task(self, key):
        self._bucket.remove(key)

    def complete_task(self, key):
        saved_task = self.get_task(key)
        saved_task["isComplete"] = True
        self._bucket.upsert(key, saved_task)
class Populator():
    def __init__(self):
        self.watcher = riotwatcher.RiotWatcher(
            default_region=riotwatcher.EUROPE_WEST,
            key='4f973eb3-7400-4eaf-9e6b-05b9ca56068c')
        self.bkt = Bucket()
        self.player_dict = self.get_players()
        self.bkt.upsert('Champions', self.watcher.static_get_champion_list())
        while True:
            for _, v in self.player_dict.iteritems():
                self.update_recent_games(v)
                time.sleep(SLEEP_TIME)

    def get_players(self):
        try:
            player_dict = self.bkt.get('Players').value
        except NotFoundError:
            player_dict = self.watcher.get_summoners(names=player_list)
            self.bkt.upsert('Players', player_dict)
        return player_dict

    def update_recent_games(self, player):
        api_matches = self.watcher.get_recent_games(player['id'])
        cb_key = 'Matches::{}'.format(player['id'])
        try:
            db_matches = self.bkt.get(cb_key).value
        except NotFoundError:
            self.bkt.upsert(cb_key, api_matches)
        else:
            game_ids = list()
            for db_match in db_matches['games']:
                game_ids.append(db_match['gameId'])

            for api_match in api_matches['games']:
                if api_match['gameId'] in game_ids:
                    break
                else:
                    db_matches['games'].insert(0, api_match)

            self.bkt.upsert(cb_key, db_matches)
예제 #36
0
class CouchbaseConnection(BaseConnection):
    """ connects to a couchbase server """
    def __init__(self, bucket, host=None, password=None):
        connstr = 'couchbase://{h}/{b}'.format(h=(host or 'localhost'),
                                               b=bucket)
        self._cb = Bucket(connstr, password=password)

    def get(self, key):
        result = self._cb.get(key, quiet=True)
        if result.success: return result.value

    def set(self, key, value):
        if key is None:
            key = uuid4().hex
        encoded_val = dumps(value)
        result = self._cb.upsert(key, value, persist_to=1)
        if result.success:
            return result.key, result.cas
        raise PersistenceError()

    def delete(self, key):
        return self._cb.remove(key, quiet=True)

    def query(self, design, name, **kw):
        return self._cb.query(design, name, **kw)

    def design_view_create(self, design, views, syncwait=5):
        """
        design => name of design document
        views => dict { 'viewname': {'map':...} }
        """
        views_d = {'views': views}
        return self._cb.design_create(design,
                                      views_d,
                                      use_devmode=False,
                                      syncwait=syncwait)

    def view_create(self, design, name, mapf, redf=None, syncwait=5):
        mapf = dedent(mapf.lstrip('\n'))
        redf = dedent(redf.lstrip('\n')) if redf else ''
        doc = {'views': {name: {'map': mapf, 'reduce': redf}}}
        self._cb.design_create(design,
                               doc,
                               use_devmode=False,
                               syncwait=syncwait)

    def view_destroy(self, design):
        return self._cb.design_delete(design)
예제 #37
0
class CouchbaseManager(Manager):
    def __init__(self, bucket):
        from couchbase.bucket import Bucket
        self.bucket = Bucket('couchbase://{}/{}'.format(
            os.environ.get('CB_HOST'), bucket))
        self.bucket.timeout = 30

    def set(self, key, value):
        self.bucket.upsert(key, value)

    def get(self, key):
        from couchbase.exceptions import NotFoundError
        try:
            return self.bucket.get(key).value
        except NotFoundError:
            raise FileNotFoundError()
예제 #38
0
class CouchbaseManager(Manager):

    def __init__(self, bucket):
        from couchbase.bucket import Bucket
        self.bucket = Bucket('couchbase://{}/{}'.format(
            os.environ.get('CB_HOST'), bucket))
        self.bucket.timeout = 30

    def set(self, key, value):
        self.bucket.upsert(key, value)

    def get(self, key):
        from couchbase.exceptions import NotFoundError
        try:
            return self.bucket.get(key).value
        except NotFoundError:
            raise FileNotFoundError()
def remove(
    bucket: Bucket, *, doc_id: str, doc_model: Type[PydanticModel] = None, persist_to=0
) -> Optional[Union[PydanticModel, bool]]:
    result = bucket.get(doc_id, quiet=True)
    if not result.value:
        return None
    if doc_model:
        model = doc_model(**result.value)
    with bucket.durability(
        persist_to=persist_to, timeout=config.COUCHBASE_DURABILITY_TIMEOUT_SECS
    ):
        result = bucket.remove(doc_id)
        if not result.success:
            return None
        if doc_model:
            return model
        return True
예제 #40
0
class Datefacet:

    def __init__(self):
        from couchbase.n1ql import N1QLQuery
        from multiprocessing import Manager, Lock
        self.cb = Bucket('couchbase://172.23.123.38/bucket-1')
        self.row_iter = self.cb.n1ql_query(N1QLQuery('select meta().id from `bucket-1`'))
        self.lock = Lock()
        self.dsize = 1000000
        self.dateiter = Manager().dict({key: None for key in ['2013-10-17', '2013-11-17', '2014-02-09', '2015-11-26']})
        self.dateiter['2013-10-17'] = .65 * self.dsize
        self.dateiter['2013-11-17'] = .2 * self.dsize
        self.dateiter['2014-02-09'] = .1 * self.dsize
        self.dateiter['2015-11-26'] = .05 * self.dsize
        self.cycledates = itertools.cycle(self.dateiter.keys())

    def createdateset(self):
        for resultid in self.row_iter:
            '''
            Day 1 should have approximately 65% of the documents
            Day 2 should have approximately 20% of the documents
            Day 3 should have approximately 10% of the documents
            Day 4 should have approximately 5% of the documents
            format like this 2010-07-27
            '''
            val = self.cb.get(resultid["id"]).value
            self.lock.acquire()
            tmpdate = self.cycledates.next()
            val["date"] = tmpdate
            self.cb.set(resultid["id"], val)
            '''
             Critical section
            '''
            self.dateiter[tmpdate] -= 1
            if self.dateiter[tmpdate] == 0:
                self.dateiter.pop(tmpdate, None)
                self.cycledates = itertools.cycle(self.dateiter.keys())

            self.lock.release()
            print(self.dateiter)

    def run(self):
        import concurrent.futures
        with concurrent.futures.ProcessPoolExecutor(max_workers=10) as executor:
            executor.submit(self.createdateset())
예제 #41
0
class Datefacet:
    def __init__(self):
        from multiprocessing import Manager, Lock
        self.cb = Bucket('couchbase://172.23.99.211/bucket-1',
                         password="******")
        self.lock = Lock()
        self.dsize = 1000000
        self.dateiter = Manager().dict({
            key: None
            for key in
            ['2013-10-17', '2013-11-17', '2014-02-09', '2015-11-26']
        })
        self.dateiter['2013-10-17'] = .65 * self.dsize
        self.dateiter['2013-11-17'] = .2 * self.dsize
        self.dateiter['2014-02-09'] = .1 * self.dsize
        self.dateiter['2015-11-26'] = .05 * self.dsize
        self.cycledates = itertools.cycle(self.dateiter.keys())

    def createdateset(self):
        for resultid in range(0, self.dsize):
            key = hex(resultid)[2:]
            '''
            Day 1 should have approximately 65% of the documents
            Day 2 should have approximately 20% of the documents
            Day 3 should have approximately 10% of the documents
            Day 4 should have approximately 5% of the documents
            format like this 2010-07-27
            '''
            val = self.cb.get(key).value
            self.lock.acquire()
            tmpdate = next(self.cycledates)
            val["date"] = tmpdate
            self.cb.set(key, val)
            self.dateiter[tmpdate] -= 1
            if self.dateiter[tmpdate] == 0:
                self.dateiter.pop(tmpdate, None)
                self.cycledates = itertools.cycle(self.dateiter.keys())
            self.lock.release()

    def run(self):
        with concurrent.futures.ProcessPoolExecutor(
                max_workers=10) as executor:
            executor.submit(self.createdateset())
예제 #42
0
class GenericDocgen:
    def __init__(self, master_file_path, cb_url, bucket_name, chunk_size=1024):
        self.chunk_size = chunk_size
        self.items = 100
        self.master_file_path = master_file_path
        self.dateBuilder = DateBuilder()
        self.numBuilder = NumericBuilder(self.items)

        self.c = Bucket("couchbase://{}/{}?operation_timeout=10".format(
            cb_url, bucket_name),
                        password="******")

    def read_file_gen(self, file):
        while True:
            data = file.read(self.chunk_size)
            if not data:
                break
            yield data

    def build(self, master_file):
        self.master_lines = self.read_file_gen(master_file)
        doc = {}
        doc["text"] = self.master_lines.next()
        doc["text2"] = self.master_lines.next()
        doc["time"] = self.numBuilder.build()
        doc["date"] = self.dateBuilder.build()
        return doc

    def run(self):
        master_file = open(self.master_file_path)
        i = 0
        while i < self.items:
            key = hex(i)[2:]
            val = self.build(master_file)
            self.c.upsert(key, val)
            try:
                saved = self.c.get(key).value
                if 'text' in saved:
                    i += 1
            except Exception as e:
                pass
        master_file.close()
예제 #43
0
class GenericDocgen:
    def __init__(self, master_file_path, cb_url, bucket_name, chunk_size=1024):
        self.chunk_size = chunk_size
        self.items = 100
        self.master_file_path = master_file_path
        self.dateBuilder = DateBuilder()
        self.numBuilder = NumericBuilder(self.items)

        self.c = Bucket("couchbase://{}/{}?operation_timeout=10".format(cb_url, bucket_name), password="******")

    def read_file_gen(self, file):
        while True:
            data = file.read(self.chunk_size)
            if not data:
                break
            yield data

    def build(self, master_file):
        self.master_lines = self.read_file_gen(master_file)
        doc = {}
        doc["text"] = self.master_lines.next()
        doc["text2"] = self.master_lines.next()
        doc["time"] = self.numBuilder.build()
        doc["date"] = self.dateBuilder.build()
        return doc

    def run(self):
        master_file = open(self.master_file_path)
        i = 0
        while i < self.items:
            key = hex(i)[2:]
            val = self.build(master_file)
            self.c.upsert(key, val)
            try:
                saved = self.c.get(key).value
                if 'text' in saved:
                    i += 1
            except Exception as e:
                pass
        master_file.close()
예제 #44
0
class Datefacet:
    def __init__(self):
        from multiprocessing import Manager, Lock
        self.cb = Bucket('couchbase://172.23.99.211/bucket-1', password="******")
        self.lock = Lock()
        self.dsize = 1000000
        self.dateiter = Manager().dict({key: None for key in ['2013-10-17', '2013-11-17', '2014-02-09', '2015-11-26']})
        self.dateiter['2013-10-17'] = .65 * self.dsize
        self.dateiter['2013-11-17'] = .2 * self.dsize
        self.dateiter['2014-02-09'] = .1 * self.dsize
        self.dateiter['2015-11-26'] = .05 * self.dsize
        self.cycledates = itertools.cycle(self.dateiter.keys())

    def createdateset(self):
        for resultid in range(0, self.dsize):
            key = hex(resultid)[2:]
            '''
            Day 1 should have approximately 65% of the documents
            Day 2 should have approximately 20% of the documents
            Day 3 should have approximately 10% of the documents
            Day 4 should have approximately 5% of the documents
            format like this 2010-07-27
            '''
            val = self.cb.get(key).value
            self.lock.acquire()
            tmpdate = next(self.cycledates)
            val["date"] = tmpdate
            self.cb.set(key, val)
            self.dateiter[tmpdate] -= 1
            if self.dateiter[tmpdate] == 0:
                self.dateiter.pop(tmpdate, None)
                self.cycledates = itertools.cycle(self.dateiter.keys())
            self.lock.release()

    def run(self):
        with concurrent.futures.ProcessPoolExecutor(max_workers=10) as executor:
            executor.submit(self.createdateset())
예제 #45
0
파일: views.py 프로젝트: dmitrof/nihongo
    def post(self, request, group_id, deck_id, *args, **kwargs):
        c = Bucket('couchbase://localhost/nihongo')
        success = 'dunno'
        if 'delete_card' in request.POST:
            try:
                card_id = request.POST['card_id']
                print(card_id)
                deck = c.get(deck_id).value
                print(deck.get('cards_list'))
                deck.get('cards_list').remove(card_id)
                c.upsert(deck_id, deck)
                success = 'success'
            except (BaseException, CouchbaseError) as e:
                success = 'error'
                print(e)
            return HttpResponseRedirect(reverse('tutor:deck_detail', kwargs={'group_id' : group_id,'deck_id' : deck_id}))
        if 'edit_card' in request.POST:
            try:
                card_id = request.POST['card_id']
                return HttpResponseRedirect(reverse('tutor:edit_card', kwargs={'group_id' : group_id, 'deck_id' : deck_id, 'card_id' : card_id}))
            except BaseException as e:
                print(e)
                raise Http404()
        if 'create_card' in request.POST:
            print("CREATING A NEW CARD IN DECK")
            try:
                task_type = request.POST['task_type']
                card_id = 'card_'  + str(uuid4()).replace('-', '_')
                print("SUCESSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSSss")
                return HttpResponseRedirect(reverse('tutor:edit_card', kwargs={'group_id' : group_id, 'deck_id' : deck_id, 'card_id' : card_id, 'is_new' : True, 'task_type' : task_type}))
            except (BaseException, CouchbaseError) as e:
                print(e)
                raise Http404(e)

        else:
            print("nothing happened")
            return HttpResponseRedirect(reverse('tutor:deck_detail', kwargs={'group_id' : group_id,'deck_id' : deck_id}))
예제 #46
0
class Db(object):
    def __init__(self, couchbase_sup=False, mongo_sup=False):
        self.cfg = election[os.getenv("TARGET_PLATFORM")]
        self.vt = None
        self.mysql = None
        self.connect_mysql()
        if couchbase_sup:
            cb_config = self.cfg.COUCHBASE_PARAM
            self.cb = Bucket("couchbase://{0}/{1}".format(cb_config[0], cb_config[1]), username=cb_config[2], password=cb_config[3])
        if mongo_sup:
            mongo_cfg = self.cfg.MONGO_PARAM
            self.mongodb_client = MongoClient(host=mongo_cfg[0], port=int(mongo_cfg[1]), username=mongo_cfg[2], password=mongo_cfg[3], authSource=mongo_cfg[4], authMechanism=mongo_cfg[5])

    def connect_mysql(self):
        mysql_config = self.cfg.MYSQL_PARAM
        self.mysql = mdb.connect(host=mysql_config[0], user=mysql_config[1], passwd=mysql_config[2], db=mysql_config[3], port=int(mysql_config[4]))
        self.mysql.autocommit(False)
        self.vt = self.mysql.cursor()

    def write_mysql(self, query):
        try:
            self.vt.execute(query)
            return True
        except mdb.OperationalError:
            self.connect_mysql()
            self.vt.execute(query)
            return True

    def count_mysql(self, query):
        try:
            self.vt.execute(query)
            return self.vt.rowcount
        except mdb.OperationalError:
            self.connect_mysql()
            self.vt.execute(query)
            return self.vt.rowcount

    def readt_mysql(self, query):
        try:
            self.vt.execute(query)
            self.mysql_commit()
            return self.vt.fetchall()
        except mdb.OperationalError:
            self.connect_mysql()
            self.vt.execute(query)
            self.mysql_commit()
            return self.vt.fetchall()

    def mysql_commit(self):
        self.mysql.commit()

    def mysql_rollback(self):
        self.mysql.rollback()

    def write_couchbase(self, arg):
        key = calculate_hash(arg.keys()[0])
        values = []
        for i in arg.values():
            if isinstance(i, str):
                values.append(calculate_hash(i))
                continue
            if isinstance(i, list):
                for e in i:
                    values.append(calculate_hash(e))
                continue
            values.append(i)
        try:
            self.cb.insert(key, values)
        except couchbase.exceptions.KeyExistsError:
            self.cb.replace(key, values)
        return True

    def readt_couchbase(self, key):
        try:
            return True, self.cb.get(calculate_hash(key)).value
        except couchbase.exceptions.NotFoundError:
            return False, 0

    def delete_key_couchbase(self, key):
        try:
            self.cb.delete(calculate_hash(key), quiet=True)
        except couchbase.exceptions.NotFoundError:
            pass
        finally:
            return True
예제 #47
0
from __future__ import print_function
from time import sleep

from couchbase.bucket import Bucket
from couchbase.exceptions import NotFoundError

cb = Bucket('couchbase://10.0.0.31/default')

print('Storing with an expiration of 2 seconds')
cb.upsert('docid', {'some': 'value'}, ttl=2)

print('Getting item back immediately')
print(cb.get('docid').value)

print('Sleeping for 4 seconds...')
sleep(4)
print('Getting key again...')
try:
    cb.get('docid')
except NotFoundError:
    print('Get failed because item has expired')

print('Storing item again (without expiry)')
cb.upsert('docid', {'some': 'value'})

print('Using get-and-touch to retrieve key and modify expiry')
rv = cb.get('docid', ttl=2)
print('Value is:', rv.value)

print('Sleeping for 4 seconds again')
sleep(4)
예제 #48
0
def storeTest(jobDoc, view, first_pass = True, lastTotalCount = -1, claimedBuilds = None):

    bucket = view["bucket"]

    claimedBuilds = claimedBuilds or {}
    client = Bucket(HOST+'/'+bucket)

    doc = jobDoc
    url = doc["url"]

    if url.find("sdkbuilds.couchbase") > -1:
        url = url.replace("sdkbuilds.couchbase", "sdkbuilds.sc.couchbase")

    res = getJS(url, {"depth" : 0})

    if res is None:
        return

    # do not process disabled jobs
    if isDisabled(doc):
        purgeDisabled(res, bucket)
        return

    # operate as 2nd pass if test_executor
    if isExecutor(doc["name"]):
        first_pass = False

    buildHist = {}
    if res.get("lastBuild") is not None:

        bids = [b["number"] for b in res["builds"]]

        if first_pass:
            bids.reverse()  # bottom to top 1st pass

        for bid in bids:

            oldName = JOBS.get(doc["name"]) is not None
            if oldName and bid in JOBS[doc["name"]]:
                continue # job already stored
            else:
                if oldName and first_pass == False:
                    JOBS[doc["name"]].append(bid)

            doc["build_id"] = bid
            res = getJS(url+str(bid), {"depth" : 0})
            if res is None:
                return

            if "result" not in res:
                continue

            doc["result"] = res["result"]
            doc["duration"] = res["duration"]

            if res["result"] not in ["SUCCESS", "UNSTABLE", "FAILURE", "ABORTED"]:
                continue # unknown result state

            actions = res["actions"]
            totalCount = getAction(actions, "totalCount") or 0
            failCount  = getAction(actions, "failCount") or 0
            skipCount  = getAction(actions, "skipCount") or 0
            doc["claim"] = getClaimReason(actions)
            if totalCount == 0:
                if lastTotalCount == -1:
                    continue # no tests ever passed for this build
                else:
                    totalCount = lastTotalCount
                    failCount = totalCount
            else:
                lastTotalCount = totalCount

            doc["failCount"] = failCount
            doc["totalCount"] = totalCount - skipCount
            params = getAction(actions, "parameters")
            if params is None:
               # possibly new api
               if not 'keys' in dir(actions) and len(actions) > 0:
                   # actions is not a dict and has data
                   # then use the first object that is a list
                   for a in actions:
                      if not 'keys' in dir(a):
                          params = a

            componentParam = getAction(params, "name", "component")
            if componentParam is None:
                testYml = getAction(params, "name", "test")
                if testYml and testYml.find(".yml"):
                    testFile = testYml.split(" ")[1]
                    componentParam = "systest-"+str(os.path.split(testFile)[-1]).replace(".yml","")

            if componentParam:
                subComponentParam = getAction(params, "name", "subcomponent")
                if subComponentParam is None:
                    subComponentParam = "server"
                osParam = getAction(params, "name", "OS") or getAction(params, "name", "os")
                if osParam is None:
                    osParam = doc["os"]
                if not componentParam or not subComponentParam or not osParam:
                    continue

                pseudoName = str(osParam+"-"+componentParam+"_"+subComponentParam)
                doc["name"] = pseudoName
                _os, _comp = getOsComponent(pseudoName, view)
                if _os and  _comp:
                   doc["os"] = _os
                   doc["component"] = _comp
                if not doc.get("os") or not doc.get("component"):
                   continue


            if bucket == "server":
                doc["build"], doc["priority"] = getBuildAndPriority(params)
            else:
                doc["build"], doc["priority"] = getBuildAndPriority(params, True)

            if not doc.get("build"):
                continue

            # run special caveats on collector
            doc["component"] = caveat_swap_xdcr(doc)
            if caveat_should_skip(doc):
                continue

            histKey = doc["name"]+"-"+doc["build"]
            if not first_pass and histKey in buildHist:

                #print "REJECTED- doc already in build results: %s" % doc
                #print buildHist

                # attempt to delete if this record has been stored in couchbase

                try:
                    oldKey = "%s-%s" % (doc["name"], doc["build_id"])
                    oldKey = hashlib.md5(oldKey).hexdigest()
                    client.remove(oldKey)
                    #print "DELETED- %d:%s" % (bid, histKey)
                except:
                    pass

                continue # already have this build results


            key = "%s-%s" % (doc["name"], doc["build_id"])
            key = hashlib.md5(key).hexdigest()

            try: # get custom claim if exists
                oldDoc = client.get(key)
                customClaim =  oldDoc.value.get('customClaim')
                if customClaim:
                    claimedBuilds[doc["build"]] = customClaim
            except:
                pass #ok, this is new doc 

            if doc["build"] in claimedBuilds: # apply custom claim
                doc['customClaim'] = claimedBuilds[doc["build"]] 

            try:
                client.upsert(key, doc)
                buildHist[histKey] = doc["build_id"]
            except:
                print "set failed, couchbase down?: %s"  % (HOST)


    if first_pass:
        storeTest(jobDoc, view, first_pass = False, lastTotalCount = lastTotalCount, claimedBuilds = claimedBuilds)
예제 #49
0
파일: db.py 프로젝트: hkodungallur/bbdb
class DB(object):
    def __init__(self, bucket):
        self.bucket = bucket
        self.db = Bucket(bucket, lockmode=LOCKMODE_WAIT)

    def doc_exists(self, docId):
        try:
            result = self.db.get(docId)
        except CouchbaseError as e:
            return False

        return result

    def insert_build_history(self, build, update=False):
        try:
            docId = build['version']+"-"+str(build['build_num'])
            if update:
                result = self.db.upsert(docId, build)
            else:
                result = self.db.insert(docId, build)
            logger.debug("{0}".format(result))
        except CouchbaseError as e:
            if e.rc == 12: 
                logger.warning("Couldn't create build history {0} due to error: {1}".format(docId, e))
                docId = None

        return docId

    def insert_distro_history(self, distro, update=False):
        try:
            docId = distro['version']+"-"+str(distro['build_num'])+"-"+distro['distro']+"-"+distro['edition']
            if update:
                result = self.db.upsert(docId, distro)
            else:
                result = self.db.insert(docId, distro)
            logger.debug("{0}".format(result))
        except CouchbaseError as e:
            if e.rc == 12:
                logger.warning("Couldn't create distro history {0} due to error: {1}".format(docId, e))
                docId = None

        return docId

    def insert_test_history(self, unit, test_type='unit', update=False):
        try:
            if test_type == 'unit':
                docId = unit['version']+"-"+str(unit['build_num'])+"-"+unit['distro']+"-"+unit['edition']+'-tests'
            elif test_type == 'build_sanity':
                docId = unit['version']+"-"+str(unit['build_num'])+"-"+unit['distro']+"-"+unit['edition']+'-sanity-tests'

            if update:
                result = self.db.upsert(docId, unit)
            else:
                result = self.db.insert(docId, unit)
            logger.debug("{0}".format(result))
        except CouchbaseError as e:
            if e.rc == 12:
                logger.warning("Couldn't create test history {0} due to error: {1}".format(docId, e))
                docId = None

        return docId

    def insert_commit(self, commit):
        docId = commit['repo']+"-"+str(commit['sha'])
        inb = commit['in_build'][0]
        try:
            result = self.db.get(docId)
            val = result.value
            if not inb in val['in_build']:
                val['in_build'].append(inb)
                result = self.db.upsert(docId, val)
        except CouchbaseError as e:
            if e.rc == 13:
                try: 
                    result = self.db.insert(docId, commit)
                    logger.debug("{0}".format(result))
                except CouchbaseError as e:
                    print e.rc
                    if e.rc == 12: 
                        logger.error("Couldn't create commit history {0} due to error: {1}".format(docId, e))
                        docId = None

        return docId

    def update_distro_result(self, docId, distroId, result):
        try:
            ret = self.db.get(docId).value
            if not distroId in ret[result]:
                ret[result].append(distroId)
            if result != 'incomplete':
                if distroId in ret['incomplete']:
                    ret['incomplete'].remove(distroId)
            self.db.upsert(docId, ret)
            logger.debug("{0}".format(result))
        except CouchbaseError as e:
            logger.warning("Couldn't update distro result on {0} due to error: {1}".format(docId, e))
            docId = None

        return

    def get_incomplete_builds(self):
        q = N1QLQuery("select url from `build-history` where result is NULL")
        urls = []
        for row in self.db.n1ql_query(q):
            urls.append(row['url'])
        return urls

    def get_incomplete_sanity_runs(self):
        q = N1QLQuery("select sanity_url from `build-history` where type = 'top_level_build' and sanity_result = 'INCOMPLETE'")
        urls = []
        for row in self.db.n1ql_query(q):
            urls.append(row['sanity_url'])
        return urls

    def get_incomplete_unit_runs(self):
        q = N1QLQuery("select unit_urls from `build-history` where type = 'top_level_build' and unit_result = 'INCOMPLETE'")
        urls = []
        for row in self.db.n1ql_query(q):
            ulist = row['unit_urls']
            for u in ulist:
                if u['result'] == 'INCOMPLETE':
                    urls.append(u['url'])
        return urls
예제 #50
0
class TestStandardCouchDB(unittest.TestCase):
    def setup_class(self):
        """ Clear all spans before a test run """
        self.recorder = tracer.recorder
        self.cluster = Cluster('couchbase://%s' % testenv['couchdb_host'])
        self.bucket = Bucket('couchbase://%s/travel-sample' %
                             testenv['couchdb_host'],
                             username=testenv['couchdb_username'],
                             password=testenv['couchdb_password'])

    def setup_method(self):
        self.bucket.upsert('test-key', 1)
        time.sleep(0.5)
        self.recorder.clear_spans()

    def test_vanilla_get(self):
        res = self.bucket.get("test-key")
        assert (res)

    def test_pipeline(self):
        pass

    def test_upsert(self):
        res = None
        with tracer.start_active_span('test'):
            res = self.bucket.upsert("test_upsert", 1)

        assert (res)
        self.assertTrue(res.success)

        spans = self.recorder.queued_spans()
        self.assertEqual(2, len(spans))

        test_span = get_first_span_by_name(spans, 'sdk')
        assert (test_span)
        self.assertEqual(test_span.data["sdk"]["name"], 'test')

        cb_span = get_first_span_by_name(spans, 'couchbase')
        assert (cb_span)

        # Same traceId and parent relationship
        self.assertEqual(test_span.t, cb_span.t)
        self.assertEqual(cb_span.p, test_span.s)

        assert (cb_span.stack)
        self.assertIsNone(cb_span.ec)

        self.assertEqual(cb_span.data["couchbase"]["hostname"],
                         "%s:8091" % testenv['couchdb_host'])
        self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample')
        self.assertEqual(cb_span.data["couchbase"]["type"], 'upsert')

    def test_upsert_multi(self):
        res = None

        kvs = dict()
        kvs['first_test_upsert_multi'] = 1
        kvs['second_test_upsert_multi'] = 1

        with tracer.start_active_span('test'):
            res = self.bucket.upsert_multi(kvs)

        assert (res)
        self.assertTrue(res['first_test_upsert_multi'].success)
        self.assertTrue(res['second_test_upsert_multi'].success)

        spans = self.recorder.queued_spans()
        self.assertEqual(2, len(spans))

        test_span = get_first_span_by_name(spans, 'sdk')
        assert (test_span)
        self.assertEqual(test_span.data["sdk"]["name"], 'test')

        cb_span = get_first_span_by_name(spans, 'couchbase')
        assert (cb_span)

        # Same traceId and parent relationship
        self.assertEqual(test_span.t, cb_span.t)
        self.assertEqual(cb_span.p, test_span.s)

        assert (cb_span.stack)
        self.assertIsNone(cb_span.ec)

        self.assertEqual(cb_span.data["couchbase"]["hostname"],
                         "%s:8091" % testenv['couchdb_host'])
        self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample')
        self.assertEqual(cb_span.data["couchbase"]["type"], 'upsert_multi')

    def test_insert_new(self):
        res = None
        try:
            self.bucket.remove('test_insert_new')
        except NotFoundError:
            pass

        with tracer.start_active_span('test'):
            res = self.bucket.insert("test_insert_new", 1)

        assert (res)
        self.assertTrue(res.success)

        spans = self.recorder.queued_spans()
        self.assertEqual(2, len(spans))

        test_span = get_first_span_by_name(spans, 'sdk')
        assert (test_span)
        self.assertEqual(test_span.data["sdk"]["name"], 'test')

        cb_span = get_first_span_by_name(spans, 'couchbase')
        assert (cb_span)

        # Same traceId and parent relationship
        self.assertEqual(test_span.t, cb_span.t)
        self.assertEqual(cb_span.p, test_span.s)

        assert (cb_span.stack)
        self.assertIsNone(cb_span.ec)

        self.assertEqual(cb_span.data["couchbase"]["hostname"],
                         "%s:8091" % testenv['couchdb_host'])
        self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample')
        self.assertEqual(cb_span.data["couchbase"]["type"], 'insert')

    def test_insert_existing(self):
        res = None
        try:
            self.bucket.insert("test_insert", 1)
        except KeyExistsError:
            pass

        try:
            with tracer.start_active_span('test'):
                res = self.bucket.insert("test_insert", 1)
        except KeyExistsError:
            pass

        self.assertIsNone(res)

        spans = self.recorder.queued_spans()
        self.assertEqual(2, len(spans))

        test_span = get_first_span_by_name(spans, 'sdk')
        assert (test_span)
        self.assertEqual(test_span.data["sdk"]["name"], 'test')

        cb_span = get_first_span_by_name(spans, 'couchbase')
        assert (cb_span)

        # Same traceId and parent relationship
        self.assertEqual(test_span.t, cb_span.t)
        self.assertEqual(cb_span.p, test_span.s)

        assert (cb_span.stack)
        self.assertEqual(cb_span.ec, 1)
        # Just search for the substring of the exception class
        found = cb_span.data["couchbase"]["error"].find("_KeyExistsError")
        self.assertFalse(found == -1, "Error substring not found.")

        self.assertEqual(cb_span.data["couchbase"]["hostname"],
                         "%s:8091" % testenv['couchdb_host'])
        self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample')
        self.assertEqual(cb_span.data["couchbase"]["type"], 'insert')

    def test_insert_multi(self):
        res = None

        kvs = dict()
        kvs['first_test_upsert_multi'] = 1
        kvs['second_test_upsert_multi'] = 1

        try:
            self.bucket.remove('first_test_upsert_multi')
            self.bucket.remove('second_test_upsert_multi')
        except NotFoundError:
            pass

        with tracer.start_active_span('test'):
            res = self.bucket.insert_multi(kvs)

        assert (res)
        self.assertTrue(res['first_test_upsert_multi'].success)
        self.assertTrue(res['second_test_upsert_multi'].success)

        spans = self.recorder.queued_spans()
        self.assertEqual(2, len(spans))

        test_span = get_first_span_by_name(spans, 'sdk')
        assert (test_span)
        self.assertEqual(test_span.data["sdk"]["name"], 'test')

        cb_span = get_first_span_by_name(spans, 'couchbase')
        assert (cb_span)

        # Same traceId and parent relationship
        self.assertEqual(test_span.t, cb_span.t)
        self.assertEqual(cb_span.p, test_span.s)

        assert (cb_span.stack)
        self.assertIsNone(cb_span.ec)

        self.assertEqual(cb_span.data["couchbase"]["hostname"],
                         "%s:8091" % testenv['couchdb_host'])
        self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample')
        self.assertEqual(cb_span.data["couchbase"]["type"], 'insert_multi')

    def test_replace(self):
        res = None
        try:
            self.bucket.insert("test_replace", 1)
        except KeyExistsError:
            pass

        with tracer.start_active_span('test'):
            res = self.bucket.replace("test_replace", 2)

        assert (res)
        self.assertTrue(res.success)

        spans = self.recorder.queued_spans()
        self.assertEqual(2, len(spans))

        test_span = get_first_span_by_name(spans, 'sdk')
        assert (test_span)
        self.assertEqual(test_span.data["sdk"]["name"], 'test')

        cb_span = get_first_span_by_name(spans, 'couchbase')
        assert (cb_span)

        # Same traceId and parent relationship
        self.assertEqual(test_span.t, cb_span.t)
        self.assertEqual(cb_span.p, test_span.s)

        assert (cb_span.stack)
        self.assertIsNone(cb_span.ec)

        self.assertEqual(cb_span.data["couchbase"]["hostname"],
                         "%s:8091" % testenv['couchdb_host'])
        self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample')
        self.assertEqual(cb_span.data["couchbase"]["type"], 'replace')

    def test_replace_non_existent(self):
        res = None

        try:
            self.bucket.remove("test_replace")
        except NotFoundError:
            pass

        try:
            with tracer.start_active_span('test'):
                res = self.bucket.replace("test_replace", 2)
        except NotFoundError:
            pass

        self.assertIsNone(res)

        spans = self.recorder.queued_spans()
        self.assertEqual(2, len(spans))

        test_span = get_first_span_by_name(spans, 'sdk')
        assert (test_span)
        self.assertEqual(test_span.data["sdk"]["name"], 'test')

        cb_span = get_first_span_by_name(spans, 'couchbase')
        assert (cb_span)

        # Same traceId and parent relationship
        self.assertEqual(test_span.t, cb_span.t)
        self.assertEqual(cb_span.p, test_span.s)

        assert (cb_span.stack)
        self.assertEqual(cb_span.ec, 1)
        # Just search for the substring of the exception class
        found = cb_span.data["couchbase"]["error"].find("NotFoundError")
        self.assertFalse(found == -1, "Error substring not found.")

        self.assertEqual(cb_span.data["couchbase"]["hostname"],
                         "%s:8091" % testenv['couchdb_host'])
        self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample')
        self.assertEqual(cb_span.data["couchbase"]["type"], 'replace')

    def test_replace_multi(self):
        res = None

        kvs = dict()
        kvs['first_test_replace_multi'] = 1
        kvs['second_test_replace_multi'] = 1

        self.bucket.upsert('first_test_replace_multi', "one")
        self.bucket.upsert('second_test_replace_multi', "two")

        with tracer.start_active_span('test'):
            res = self.bucket.replace_multi(kvs)

        assert (res)
        self.assertTrue(res['first_test_replace_multi'].success)
        self.assertTrue(res['second_test_replace_multi'].success)

        spans = self.recorder.queued_spans()
        self.assertEqual(2, len(spans))

        test_span = get_first_span_by_name(spans, 'sdk')
        assert (test_span)
        self.assertEqual(test_span.data["sdk"]["name"], 'test')

        cb_span = get_first_span_by_name(spans, 'couchbase')
        assert (cb_span)

        # Same traceId and parent relationship
        self.assertEqual(test_span.t, cb_span.t)
        self.assertEqual(cb_span.p, test_span.s)

        assert (cb_span.stack)
        self.assertIsNone(cb_span.ec)

        self.assertEqual(cb_span.data["couchbase"]["hostname"],
                         "%s:8091" % testenv['couchdb_host'])
        self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample')
        self.assertEqual(cb_span.data["couchbase"]["type"], 'replace_multi')

    def test_append(self):
        self.bucket.upsert("test_append", "one")

        res = None
        with tracer.start_active_span('test'):
            res = self.bucket.append("test_append", "two")

        assert (res)
        self.assertTrue(res.success)

        spans = self.recorder.queued_spans()
        self.assertEqual(2, len(spans))

        test_span = get_first_span_by_name(spans, 'sdk')
        assert (test_span)
        self.assertEqual(test_span.data["sdk"]["name"], 'test')

        cb_span = get_first_span_by_name(spans, 'couchbase')
        assert (cb_span)

        # Same traceId and parent relationship
        self.assertEqual(test_span.t, cb_span.t)
        self.assertEqual(cb_span.p, test_span.s)

        assert (cb_span.stack)
        self.assertIsNone(cb_span.ec)

        self.assertEqual(cb_span.data["couchbase"]["hostname"],
                         "%s:8091" % testenv['couchdb_host'])
        self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample')
        self.assertEqual(cb_span.data["couchbase"]["type"], 'append')

    def test_append_multi(self):
        res = None

        kvs = dict()
        kvs['first_test_append_multi'] = "ok1"
        kvs['second_test_append_multi'] = "ok2"

        self.bucket.upsert('first_test_append_multi', "one")
        self.bucket.upsert('second_test_append_multi', "two")

        with tracer.start_active_span('test'):
            res = self.bucket.append_multi(kvs)

        assert (res)
        self.assertTrue(res['first_test_append_multi'].success)
        self.assertTrue(res['second_test_append_multi'].success)

        spans = self.recorder.queued_spans()
        self.assertEqual(2, len(spans))

        test_span = get_first_span_by_name(spans, 'sdk')
        assert (test_span)
        self.assertEqual(test_span.data["sdk"]["name"], 'test')

        cb_span = get_first_span_by_name(spans, 'couchbase')
        assert (cb_span)

        # Same traceId and parent relationship
        self.assertEqual(test_span.t, cb_span.t)
        self.assertEqual(cb_span.p, test_span.s)

        assert (cb_span.stack)
        self.assertIsNone(cb_span.ec)

        self.assertEqual(cb_span.data["couchbase"]["hostname"],
                         "%s:8091" % testenv['couchdb_host'])
        self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample')
        self.assertEqual(cb_span.data["couchbase"]["type"], 'append_multi')

    def test_prepend(self):
        self.bucket.upsert("test_prepend", "one")

        res = None
        with tracer.start_active_span('test'):
            res = self.bucket.prepend("test_prepend", "two")

        assert (res)
        self.assertTrue(res.success)

        spans = self.recorder.queued_spans()
        self.assertEqual(2, len(spans))

        test_span = get_first_span_by_name(spans, 'sdk')
        assert (test_span)
        self.assertEqual(test_span.data["sdk"]["name"], 'test')

        cb_span = get_first_span_by_name(spans, 'couchbase')
        assert (cb_span)

        # Same traceId and parent relationship
        self.assertEqual(test_span.t, cb_span.t)
        self.assertEqual(cb_span.p, test_span.s)

        assert (cb_span.stack)
        self.assertIsNone(cb_span.ec)

        self.assertEqual(cb_span.data["couchbase"]["hostname"],
                         "%s:8091" % testenv['couchdb_host'])
        self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample')
        self.assertEqual(cb_span.data["couchbase"]["type"], 'prepend')

    def test_prepend_multi(self):
        res = None

        kvs = dict()
        kvs['first_test_prepend_multi'] = "ok1"
        kvs['second_test_prepend_multi'] = "ok2"

        self.bucket.upsert('first_test_prepend_multi', "one")
        self.bucket.upsert('second_test_prepend_multi', "two")

        with tracer.start_active_span('test'):
            res = self.bucket.prepend_multi(kvs)

        assert (res)
        self.assertTrue(res['first_test_prepend_multi'].success)
        self.assertTrue(res['second_test_prepend_multi'].success)

        spans = self.recorder.queued_spans()
        self.assertEqual(2, len(spans))

        test_span = get_first_span_by_name(spans, 'sdk')
        assert (test_span)
        self.assertEqual(test_span.data["sdk"]["name"], 'test')

        cb_span = get_first_span_by_name(spans, 'couchbase')
        assert (cb_span)

        # Same traceId and parent relationship
        self.assertEqual(test_span.t, cb_span.t)
        self.assertEqual(cb_span.p, test_span.s)

        assert (cb_span.stack)
        self.assertIsNone(cb_span.ec)

        self.assertEqual(cb_span.data["couchbase"]["hostname"],
                         "%s:8091" % testenv['couchdb_host'])
        self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample')
        self.assertEqual(cb_span.data["couchbase"]["type"], 'prepend_multi')

    def test_get(self):
        res = None

        with tracer.start_active_span('test'):
            res = self.bucket.get("test-key")

        assert (res)
        self.assertTrue(res.success)

        spans = self.recorder.queued_spans()
        self.assertEqual(2, len(spans))

        test_span = get_first_span_by_name(spans, 'sdk')
        assert (test_span)
        self.assertEqual(test_span.data["sdk"]["name"], 'test')

        cb_span = get_first_span_by_name(spans, 'couchbase')
        assert (cb_span)

        # Same traceId and parent relationship
        self.assertEqual(test_span.t, cb_span.t)
        self.assertEqual(cb_span.p, test_span.s)

        assert (cb_span.stack)
        self.assertIsNone(cb_span.ec)

        self.assertEqual(cb_span.data["couchbase"]["hostname"],
                         "%s:8091" % testenv['couchdb_host'])
        self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample')
        self.assertEqual(cb_span.data["couchbase"]["type"], 'get')

    def test_rget(self):
        res = None

        try:
            with tracer.start_active_span('test'):
                res = self.bucket.rget("test-key", replica_index=None)
        except CouchbaseTransientError:
            pass

        self.assertIsNone(res)

        spans = self.recorder.queued_spans()
        self.assertEqual(2, len(spans))

        test_span = get_first_span_by_name(spans, 'sdk')
        assert (test_span)
        self.assertEqual(test_span.data["sdk"]["name"], 'test')

        cb_span = get_first_span_by_name(spans, 'couchbase')
        assert (cb_span)

        # Same traceId and parent relationship
        self.assertEqual(test_span.t, cb_span.t)
        self.assertEqual(cb_span.p, test_span.s)

        assert (cb_span.stack)
        self.assertEqual(cb_span.ec, 1)
        # Just search for the substring of the exception class
        found = cb_span.data["couchbase"]["error"].find(
            "CouchbaseTransientError")
        self.assertFalse(found == -1, "Error substring not found.")

        self.assertEqual(cb_span.data["couchbase"]["hostname"],
                         "%s:8091" % testenv['couchdb_host'])
        self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample')
        self.assertEqual(cb_span.data["couchbase"]["type"], 'rget')

    def test_get_not_found(self):
        res = None
        try:
            self.bucket.remove('test_get_not_found')
        except NotFoundError:
            pass

        try:
            with tracer.start_active_span('test'):
                res = self.bucket.get("test_get_not_found")
        except NotFoundError:
            pass

        self.assertIsNone(res)

        spans = self.recorder.queued_spans()
        self.assertEqual(2, len(spans))

        test_span = get_first_span_by_name(spans, 'sdk')
        assert (test_span)
        self.assertEqual(test_span.data["sdk"]["name"], 'test')

        cb_span = get_first_span_by_name(spans, 'couchbase')
        assert (cb_span)

        # Same traceId and parent relationship
        self.assertEqual(test_span.t, cb_span.t)
        self.assertEqual(cb_span.p, test_span.s)

        assert (cb_span.stack)
        self.assertEqual(cb_span.ec, 1)
        # Just search for the substring of the exception class
        found = cb_span.data["couchbase"]["error"].find("NotFoundError")
        self.assertFalse(found == -1, "Error substring not found.")

        self.assertEqual(cb_span.data["couchbase"]["hostname"],
                         "%s:8091" % testenv['couchdb_host'])
        self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample')
        self.assertEqual(cb_span.data["couchbase"]["type"], 'get')

    def test_get_multi(self):
        res = None

        self.bucket.upsert('first_test_get_multi', "one")
        self.bucket.upsert('second_test_get_multi', "two")

        with tracer.start_active_span('test'):
            res = self.bucket.get_multi(
                ['first_test_get_multi', 'second_test_get_multi'])

        assert (res)
        self.assertTrue(res['first_test_get_multi'].success)
        self.assertTrue(res['second_test_get_multi'].success)

        spans = self.recorder.queued_spans()
        self.assertEqual(2, len(spans))

        test_span = get_first_span_by_name(spans, 'sdk')
        assert (test_span)
        self.assertEqual(test_span.data["sdk"]["name"], 'test')

        cb_span = get_first_span_by_name(spans, 'couchbase')
        assert (cb_span)

        # Same traceId and parent relationship
        self.assertEqual(test_span.t, cb_span.t)
        self.assertEqual(cb_span.p, test_span.s)

        assert (cb_span.stack)
        self.assertIsNone(cb_span.ec)

        self.assertEqual(cb_span.data["couchbase"]["hostname"],
                         "%s:8091" % testenv['couchdb_host'])
        self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample')
        self.assertEqual(cb_span.data["couchbase"]["type"], 'get_multi')

    def test_touch(self):
        res = None
        self.bucket.upsert("test_touch", 1)

        with tracer.start_active_span('test'):
            res = self.bucket.touch("test_touch")

        assert (res)
        self.assertTrue(res.success)

        spans = self.recorder.queued_spans()
        self.assertEqual(2, len(spans))

        test_span = get_first_span_by_name(spans, 'sdk')
        assert (test_span)
        self.assertEqual(test_span.data["sdk"]["name"], 'test')

        cb_span = get_first_span_by_name(spans, 'couchbase')
        assert (cb_span)

        # Same traceId and parent relationship
        self.assertEqual(test_span.t, cb_span.t)
        self.assertEqual(cb_span.p, test_span.s)

        assert (cb_span.stack)
        self.assertIsNone(cb_span.ec)

        self.assertEqual(cb_span.data["couchbase"]["hostname"],
                         "%s:8091" % testenv['couchdb_host'])
        self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample')
        self.assertEqual(cb_span.data["couchbase"]["type"], 'touch')

    def test_touch_multi(self):
        res = None

        self.bucket.upsert('first_test_touch_multi', "one")
        self.bucket.upsert('second_test_touch_multi', "two")

        with tracer.start_active_span('test'):
            res = self.bucket.touch_multi(
                ['first_test_touch_multi', 'second_test_touch_multi'])

        assert (res)
        self.assertTrue(res['first_test_touch_multi'].success)
        self.assertTrue(res['second_test_touch_multi'].success)

        spans = self.recorder.queued_spans()
        self.assertEqual(2, len(spans))

        test_span = get_first_span_by_name(spans, 'sdk')
        assert (test_span)
        self.assertEqual(test_span.data["sdk"]["name"], 'test')

        cb_span = get_first_span_by_name(spans, 'couchbase')
        assert (cb_span)

        # Same traceId and parent relationship
        self.assertEqual(test_span.t, cb_span.t)
        self.assertEqual(cb_span.p, test_span.s)

        assert (cb_span.stack)
        self.assertIsNone(cb_span.ec)

        self.assertEqual(cb_span.data["couchbase"]["hostname"],
                         "%s:8091" % testenv['couchdb_host'])
        self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample')
        self.assertEqual(cb_span.data["couchbase"]["type"], 'touch_multi')

    def test_lock(self):
        res = None
        self.bucket.upsert("test_lock_unlock", "lock_this")

        with tracer.start_active_span('test'):
            rv = self.bucket.lock("test_lock_unlock", ttl=5)
            assert (rv)
            self.assertTrue(rv.success)

            # upsert automatically unlocks the key
            res = self.bucket.upsert("test_lock_unlock", "updated", rv.cas)
            assert (res)
            self.assertTrue(res.success)

        spans = self.recorder.queued_spans()
        self.assertEqual(3, len(spans))

        test_span = get_first_span_by_name(spans, 'sdk')
        assert (test_span)
        self.assertEqual(test_span.data["sdk"]["name"], 'test')

        filter = lambda span: span.n == "couchbase" and span.data["couchbase"][
            "type"] == "lock"
        cb_lock_span = get_first_span_by_filter(spans, filter)
        assert (cb_lock_span)

        filter = lambda span: span.n == "couchbase" and span.data["couchbase"][
            "type"] == "upsert"
        cb_upsert_span = get_first_span_by_filter(spans, filter)
        assert (cb_upsert_span)

        # Same traceId and parent relationship
        self.assertEqual(test_span.t, cb_lock_span.t)
        self.assertEqual(test_span.t, cb_upsert_span.t)

        self.assertEqual(cb_lock_span.p, test_span.s)
        self.assertEqual(cb_upsert_span.p, test_span.s)

        assert (cb_lock_span.stack)
        self.assertIsNone(cb_lock_span.ec)
        assert (cb_upsert_span.stack)
        self.assertIsNone(cb_upsert_span.ec)

        self.assertEqual(cb_lock_span.data["couchbase"]["hostname"],
                         "%s:8091" % testenv['couchdb_host'])
        self.assertEqual(cb_lock_span.data["couchbase"]["bucket"],
                         'travel-sample')
        self.assertEqual(cb_lock_span.data["couchbase"]["type"], 'lock')
        self.assertEqual(cb_upsert_span.data["couchbase"]["hostname"],
                         "%s:8091" % testenv['couchdb_host'])
        self.assertEqual(cb_upsert_span.data["couchbase"]["bucket"],
                         'travel-sample')
        self.assertEqual(cb_upsert_span.data["couchbase"]["type"], 'upsert')

    def test_lock_unlock(self):
        res = None
        self.bucket.upsert("test_lock_unlock", "lock_this")

        with tracer.start_active_span('test'):
            rv = self.bucket.lock("test_lock_unlock", ttl=5)
            assert (rv)
            self.assertTrue(rv.success)

            # upsert automatically unlocks the key
            res = self.bucket.unlock("test_lock_unlock", rv.cas)
            assert (res)
            self.assertTrue(res.success)

        spans = self.recorder.queued_spans()
        self.assertEqual(3, len(spans))

        test_span = get_first_span_by_name(spans, 'sdk')
        assert (test_span)
        self.assertEqual(test_span.data["sdk"]["name"], 'test')

        filter = lambda span: span.n == "couchbase" and span.data["couchbase"][
            "type"] == "lock"
        cb_lock_span = get_first_span_by_filter(spans, filter)
        assert (cb_lock_span)

        filter = lambda span: span.n == "couchbase" and span.data["couchbase"][
            "type"] == "unlock"
        cb_unlock_span = get_first_span_by_filter(spans, filter)
        assert (cb_unlock_span)

        # Same traceId and parent relationship
        self.assertEqual(test_span.t, cb_lock_span.t)
        self.assertEqual(test_span.t, cb_unlock_span.t)

        self.assertEqual(cb_lock_span.p, test_span.s)
        self.assertEqual(cb_unlock_span.p, test_span.s)

        assert (cb_lock_span.stack)
        self.assertIsNone(cb_lock_span.ec)
        assert (cb_unlock_span.stack)
        self.assertIsNone(cb_unlock_span.ec)

        self.assertEqual(cb_lock_span.data["couchbase"]["hostname"],
                         "%s:8091" % testenv['couchdb_host'])
        self.assertEqual(cb_lock_span.data["couchbase"]["bucket"],
                         'travel-sample')
        self.assertEqual(cb_lock_span.data["couchbase"]["type"], 'lock')
        self.assertEqual(cb_unlock_span.data["couchbase"]["hostname"],
                         "%s:8091" % testenv['couchdb_host'])
        self.assertEqual(cb_unlock_span.data["couchbase"]["bucket"],
                         'travel-sample')
        self.assertEqual(cb_unlock_span.data["couchbase"]["type"], 'unlock')

    def test_lock_unlock_muilti(self):
        res = None
        self.bucket.upsert("test_lock_unlock_multi_1", "lock_this")
        self.bucket.upsert("test_lock_unlock_multi_2", "lock_this")

        keys_to_lock = ("test_lock_unlock_multi_1", "test_lock_unlock_multi_2")

        with tracer.start_active_span('test'):
            rv = self.bucket.lock_multi(keys_to_lock, ttl=5)
            assert (rv)
            self.assertTrue(rv['test_lock_unlock_multi_1'].success)
            self.assertTrue(rv['test_lock_unlock_multi_2'].success)

            res = self.bucket.unlock_multi(rv)
            assert (res)

        spans = self.recorder.queued_spans()
        self.assertEqual(3, len(spans))

        test_span = get_first_span_by_name(spans, 'sdk')
        assert (test_span)
        self.assertEqual(test_span.data["sdk"]["name"], 'test')

        filter = lambda span: span.n == "couchbase" and span.data["couchbase"][
            "type"] == "lock_multi"
        cb_lock_span = get_first_span_by_filter(spans, filter)
        assert (cb_lock_span)

        filter = lambda span: span.n == "couchbase" and span.data["couchbase"][
            "type"] == "unlock_multi"
        cb_unlock_span = get_first_span_by_filter(spans, filter)
        assert (cb_unlock_span)

        # Same traceId and parent relationship
        self.assertEqual(test_span.t, cb_lock_span.t)
        self.assertEqual(test_span.t, cb_unlock_span.t)

        self.assertEqual(cb_lock_span.p, test_span.s)
        self.assertEqual(cb_unlock_span.p, test_span.s)

        assert (cb_lock_span.stack)
        self.assertIsNone(cb_lock_span.ec)
        assert (cb_unlock_span.stack)
        self.assertIsNone(cb_unlock_span.ec)

        self.assertEqual(cb_lock_span.data["couchbase"]["hostname"],
                         "%s:8091" % testenv['couchdb_host'])
        self.assertEqual(cb_lock_span.data["couchbase"]["bucket"],
                         'travel-sample')
        self.assertEqual(cb_lock_span.data["couchbase"]["type"], 'lock_multi')
        self.assertEqual(cb_unlock_span.data["couchbase"]["hostname"],
                         "%s:8091" % testenv['couchdb_host'])
        self.assertEqual(cb_unlock_span.data["couchbase"]["bucket"],
                         'travel-sample')
        self.assertEqual(cb_unlock_span.data["couchbase"]["type"],
                         'unlock_multi')

    def test_remove(self):
        res = None
        self.bucket.upsert("test_remove", 1)

        with tracer.start_active_span('test'):
            res = self.bucket.remove("test_remove")

        assert (res)
        self.assertTrue(res.success)

        spans = self.recorder.queued_spans()
        self.assertEqual(2, len(spans))

        test_span = get_first_span_by_name(spans, 'sdk')
        assert (test_span)
        self.assertEqual(test_span.data["sdk"]["name"], 'test')

        cb_span = get_first_span_by_name(spans, 'couchbase')
        assert (cb_span)

        # Same traceId and parent relationship
        self.assertEqual(test_span.t, cb_span.t)
        self.assertEqual(cb_span.p, test_span.s)

        assert (cb_span.stack)
        self.assertIsNone(cb_span.ec)

        self.assertEqual(cb_span.data["couchbase"]["hostname"],
                         "%s:8091" % testenv['couchdb_host'])
        self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample')
        self.assertEqual(cb_span.data["couchbase"]["type"], 'remove')

    def test_remove_multi(self):
        res = None
        self.bucket.upsert("test_remove_multi_1", 1)
        self.bucket.upsert("test_remove_multi_2", 1)

        keys_to_remove = ("test_remove_multi_1", "test_remove_multi_2")

        with tracer.start_active_span('test'):
            res = self.bucket.remove_multi(keys_to_remove)

        assert (res)
        self.assertTrue(res['test_remove_multi_1'].success)
        self.assertTrue(res['test_remove_multi_2'].success)

        spans = self.recorder.queued_spans()
        self.assertEqual(2, len(spans))

        test_span = get_first_span_by_name(spans, 'sdk')
        assert (test_span)
        self.assertEqual(test_span.data["sdk"]["name"], 'test')

        cb_span = get_first_span_by_name(spans, 'couchbase')
        assert (cb_span)

        # Same traceId and parent relationship
        self.assertEqual(test_span.t, cb_span.t)
        self.assertEqual(cb_span.p, test_span.s)

        assert (cb_span.stack)
        self.assertIsNone(cb_span.ec)

        self.assertEqual(cb_span.data["couchbase"]["hostname"],
                         "%s:8091" % testenv['couchdb_host'])
        self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample')
        self.assertEqual(cb_span.data["couchbase"]["type"], 'remove_multi')

    def test_counter(self):
        res = None
        self.bucket.upsert("test_counter", 1)

        with tracer.start_active_span('test'):
            res = self.bucket.counter("test_counter", delta=10)

        assert (res)
        self.assertTrue(res.success)

        spans = self.recorder.queued_spans()
        self.assertEqual(2, len(spans))

        test_span = get_first_span_by_name(spans, 'sdk')
        assert (test_span)
        self.assertEqual(test_span.data["sdk"]["name"], 'test')

        cb_span = get_first_span_by_name(spans, 'couchbase')
        assert (cb_span)

        # Same traceId and parent relationship
        self.assertEqual(test_span.t, cb_span.t)
        self.assertEqual(cb_span.p, test_span.s)

        assert (cb_span.stack)
        self.assertIsNone(cb_span.ec)

        self.assertEqual(cb_span.data["couchbase"]["hostname"],
                         "%s:8091" % testenv['couchdb_host'])
        self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample')
        self.assertEqual(cb_span.data["couchbase"]["type"], 'counter')

    def test_counter_multi(self):
        res = None
        self.bucket.upsert("first_test_counter", 1)
        self.bucket.upsert("second_test_counter", 1)

        with tracer.start_active_span('test'):
            res = self.bucket.counter_multi(
                ("first_test_counter", "second_test_counter"))

        assert (res)
        self.assertTrue(res['first_test_counter'].success)
        self.assertTrue(res['second_test_counter'].success)

        spans = self.recorder.queued_spans()
        self.assertEqual(2, len(spans))

        test_span = get_first_span_by_name(spans, 'sdk')
        assert (test_span)
        self.assertEqual(test_span.data["sdk"]["name"], 'test')

        cb_span = get_first_span_by_name(spans, 'couchbase')
        assert (cb_span)

        # Same traceId and parent relationship
        self.assertEqual(test_span.t, cb_span.t)
        self.assertEqual(cb_span.p, test_span.s)

        assert (cb_span.stack)
        self.assertIsNone(cb_span.ec)

        self.assertEqual(cb_span.data["couchbase"]["hostname"],
                         "%s:8091" % testenv['couchdb_host'])
        self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample')
        self.assertEqual(cb_span.data["couchbase"]["type"], 'counter_multi')

    def test_mutate_in(self):
        res = None
        self.bucket.upsert(
            'king_arthur', {
                'name': 'Arthur',
                'email': '*****@*****.**',
                'interests': ['Holy Grail', 'African Swallows']
            })

        with tracer.start_active_span('test'):
            res = self.bucket.mutate_in(
                'king_arthur', SD.array_addunique('interests', 'Cats'),
                SD.counter('updates', 1))

        assert (res)
        self.assertTrue(res.success)

        spans = self.recorder.queued_spans()
        self.assertEqual(2, len(spans))

        test_span = get_first_span_by_name(spans, 'sdk')
        assert (test_span)
        self.assertEqual(test_span.data["sdk"]["name"], 'test')

        cb_span = get_first_span_by_name(spans, 'couchbase')
        assert (cb_span)

        # Same traceId and parent relationship
        self.assertEqual(test_span.t, cb_span.t)
        self.assertEqual(cb_span.p, test_span.s)

        assert (cb_span.stack)
        self.assertIsNone(cb_span.ec)

        self.assertEqual(cb_span.data["couchbase"]["hostname"],
                         "%s:8091" % testenv['couchdb_host'])
        self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample')
        self.assertEqual(cb_span.data["couchbase"]["type"], 'mutate_in')

    def test_lookup_in(self):
        res = None
        self.bucket.upsert(
            'king_arthur', {
                'name': 'Arthur',
                'email': '*****@*****.**',
                'interests': ['Holy Grail', 'African Swallows']
            })

        with tracer.start_active_span('test'):
            res = self.bucket.lookup_in('king_arthur', SD.get('email'),
                                        SD.get('interests'))

        assert (res)
        self.assertTrue(res.success)

        spans = self.recorder.queued_spans()
        self.assertEqual(2, len(spans))

        test_span = get_first_span_by_name(spans, 'sdk')
        assert (test_span)
        self.assertEqual(test_span.data["sdk"]["name"], 'test')

        cb_span = get_first_span_by_name(spans, 'couchbase')
        assert (cb_span)

        # Same traceId and parent relationship
        self.assertEqual(test_span.t, cb_span.t)
        self.assertEqual(cb_span.p, test_span.s)

        assert (cb_span.stack)
        self.assertIsNone(cb_span.ec)

        self.assertEqual(cb_span.data["couchbase"]["hostname"],
                         "%s:8091" % testenv['couchdb_host'])
        self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample')
        self.assertEqual(cb_span.data["couchbase"]["type"], 'lookup_in')

    def test_stats(self):
        res = None

        with tracer.start_active_span('test'):
            res = self.bucket.stats()

        assert (res)

        spans = self.recorder.queued_spans()
        self.assertEqual(2, len(spans))

        test_span = get_first_span_by_name(spans, 'sdk')
        assert (test_span)
        self.assertEqual(test_span.data["sdk"]["name"], 'test')

        cb_span = get_first_span_by_name(spans, 'couchbase')
        assert (cb_span)

        # Same traceId and parent relationship
        self.assertEqual(test_span.t, cb_span.t)
        self.assertEqual(cb_span.p, test_span.s)

        assert (cb_span.stack)
        self.assertIsNone(cb_span.ec)

        self.assertEqual(cb_span.data["couchbase"]["hostname"],
                         "%s:8091" % testenv['couchdb_host'])
        self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample')
        self.assertEqual(cb_span.data["couchbase"]["type"], 'stats')

    def test_ping(self):
        res = None

        with tracer.start_active_span('test'):
            res = self.bucket.ping()

        assert (res)

        spans = self.recorder.queued_spans()
        self.assertEqual(2, len(spans))

        test_span = get_first_span_by_name(spans, 'sdk')
        assert (test_span)
        self.assertEqual(test_span.data["sdk"]["name"], 'test')

        cb_span = get_first_span_by_name(spans, 'couchbase')
        assert (cb_span)

        # Same traceId and parent relationship
        self.assertEqual(test_span.t, cb_span.t)
        self.assertEqual(cb_span.p, test_span.s)

        assert (cb_span.stack)
        self.assertIsNone(cb_span.ec)

        self.assertEqual(cb_span.data["couchbase"]["hostname"],
                         "%s:8091" % testenv['couchdb_host'])
        self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample')
        self.assertEqual(cb_span.data["couchbase"]["type"], 'ping')

    def test_diagnostics(self):
        res = None

        with tracer.start_active_span('test'):
            res = self.bucket.diagnostics()

        assert (res)

        spans = self.recorder.queued_spans()
        self.assertEqual(2, len(spans))

        test_span = get_first_span_by_name(spans, 'sdk')
        assert (test_span)
        self.assertEqual(test_span.data["sdk"]["name"], 'test')

        cb_span = get_first_span_by_name(spans, 'couchbase')
        assert (cb_span)

        # Same traceId and parent relationship
        self.assertEqual(test_span.t, cb_span.t)
        self.assertEqual(cb_span.p, test_span.s)

        assert (cb_span.stack)
        self.assertIsNone(cb_span.ec)

        self.assertEqual(cb_span.data["couchbase"]["hostname"],
                         "%s:8091" % testenv['couchdb_host'])
        self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample')
        self.assertEqual(cb_span.data["couchbase"]["type"], 'diagnostics')

    def test_observe(self):
        res = None
        self.bucket.upsert('test_observe', 1)

        with tracer.start_active_span('test'):
            res = self.bucket.observe('test_observe')

        assert (res)
        self.assertTrue(res.success)

        spans = self.recorder.queued_spans()
        self.assertEqual(2, len(spans))

        test_span = get_first_span_by_name(spans, 'sdk')
        assert (test_span)
        self.assertEqual(test_span.data["sdk"]["name"], 'test')

        cb_span = get_first_span_by_name(spans, 'couchbase')
        assert (cb_span)

        # Same traceId and parent relationship
        self.assertEqual(test_span.t, cb_span.t)
        self.assertEqual(cb_span.p, test_span.s)

        assert (cb_span.stack)
        self.assertIsNone(cb_span.ec)

        self.assertEqual(cb_span.data["couchbase"]["hostname"],
                         "%s:8091" % testenv['couchdb_host'])
        self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample')
        self.assertEqual(cb_span.data["couchbase"]["type"], 'observe')

    def test_observe_multi(self):
        res = None
        self.bucket.upsert('test_observe_multi_1', 1)
        self.bucket.upsert('test_observe_multi_2', 1)

        keys_to_observe = ('test_observe_multi_1', 'test_observe_multi_2')

        with tracer.start_active_span('test'):
            res = self.bucket.observe_multi(keys_to_observe)

        assert (res)
        self.assertTrue(res['test_observe_multi_1'].success)
        self.assertTrue(res['test_observe_multi_2'].success)

        spans = self.recorder.queued_spans()
        self.assertEqual(2, len(spans))

        test_span = get_first_span_by_name(spans, 'sdk')
        assert (test_span)
        self.assertEqual(test_span.data["sdk"]["name"], 'test')

        cb_span = get_first_span_by_name(spans, 'couchbase')
        assert (cb_span)

        # Same traceId and parent relationship
        self.assertEqual(test_span.t, cb_span.t)
        self.assertEqual(cb_span.p, test_span.s)

        assert (cb_span.stack)
        self.assertIsNone(cb_span.ec)

        self.assertEqual(cb_span.data["couchbase"]["hostname"],
                         "%s:8091" % testenv['couchdb_host'])
        self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample')
        self.assertEqual(cb_span.data["couchbase"]["type"], 'observe_multi')

    def test_raw_n1ql_query(self):
        res = None

        with tracer.start_active_span('test'):
            res = self.bucket.n1ql_query("SELECT 1")

        assert (res)

        spans = self.recorder.queued_spans()
        self.assertEqual(2, len(spans))

        test_span = get_first_span_by_name(spans, 'sdk')
        assert (test_span)
        self.assertEqual(test_span.data["sdk"]["name"], 'test')

        cb_span = get_first_span_by_name(spans, 'couchbase')
        assert (cb_span)

        # Same traceId and parent relationship
        self.assertEqual(test_span.t, cb_span.t)
        self.assertEqual(cb_span.p, test_span.s)

        assert (cb_span.stack)
        self.assertIsNone(cb_span.ec)

        self.assertEqual(cb_span.data["couchbase"]["hostname"],
                         "%s:8091" % testenv['couchdb_host'])
        self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample')
        self.assertEqual(cb_span.data["couchbase"]["type"], 'n1ql_query')
        self.assertEqual(cb_span.data["couchbase"]["sql"], 'SELECT 1')

    def test_n1ql_query(self):
        res = None

        with tracer.start_active_span('test'):
            res = self.bucket.n1ql_query(
                N1QLQuery(
                    'SELECT name FROM `travel-sample` WHERE brewery_id ="mishawaka_brewing"'
                ))

        assert (res)

        spans = self.recorder.queued_spans()
        self.assertEqual(2, len(spans))

        test_span = get_first_span_by_name(spans, 'sdk')
        assert (test_span)
        self.assertEqual(test_span.data["sdk"]["name"], 'test')

        cb_span = get_first_span_by_name(spans, 'couchbase')
        assert (cb_span)

        # Same traceId and parent relationship
        self.assertEqual(test_span.t, cb_span.t)
        self.assertEqual(cb_span.p, test_span.s)

        assert (cb_span.stack)
        self.assertIsNone(cb_span.ec)

        self.assertEqual(cb_span.data["couchbase"]["hostname"],
                         "%s:8091" % testenv['couchdb_host'])
        self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample')
        self.assertEqual(cb_span.data["couchbase"]["type"], 'n1ql_query')
        self.assertEqual(
            cb_span.data["couchbase"]["sql"],
            'SELECT name FROM `travel-sample` WHERE brewery_id ="mishawaka_brewing"'
        )
예제 #51
0
#!/usr/bin/env python
from __future__ import print_function

from couchbase.bucket import Bucket
from couchbase.exceptions import NotFoundError

cb = Bucket('couchbase://10.0.0.31/default')

print('Getting non-existent key. Should fail..')
try:
    cb.get('non-exist-document')
except NotFoundError:
    print('Got exception for missing document!')
print('...')

print('Upserting...')
cb.upsert('new_document', {'foo': 'bar'})
print('Getting...')
print(cb.get('new_document').value)
예제 #52
0
def fetch_doc(key):

    cb = Bucket('couchbase://' + localhost + '/' + cb_bucket)
    doc = cb.get(key)
    return doc
예제 #53
0
class SDKClient(object):
    """Python SDK Client Implementation for testrunner - master branch Implementation"""

    def __init__(self, bucket, hosts = ["localhost"] , scheme = "couchbase",
                 ssl_path = None, uhm_options = None, password=None,
                 quiet=True, certpath = None, transcoder = None):
        self.connection_string = \
            self._createString(scheme = scheme, bucket = bucket, hosts = hosts,
                               certpath = certpath, uhm_options = uhm_options)
        self.password = password
        self.quiet = quiet
        self.transcoder = transcoder
        self.default_timeout = 0
        self._createConn()

    def _createString(self, scheme ="couchbase", bucket = None, hosts = ["localhost"], certpath = None, uhm_options = ""):
        connection_string = "{0}://{1}".format(scheme, ", ".join(hosts).replace(" ",""))
        if bucket != None:
            connection_string = "{0}/{1}".format(connection_string, bucket)
        if uhm_options != None:
            connection_string = "{0}?{1}".format(connection_string, uhm_options)
        if scheme == "couchbases":
            if "?" in connection_string:
                connection_string = "{0},certpath={1}".format(connection_string, certpath)
            else:
                connection_string = "{0}?certpath={1}".format(connection_string, certpath)
        return connection_string

    def _createConn(self):
        try:
            self.cb = CouchbaseBucket(self.connection_string, password = self.password,
                                  quiet = self.quiet, transcoder = self.transcoder)
            self.default_timeout = self.cb.timeout
        except BucketNotFoundError as e:
             raise

    def reconnect(self):
        self.cb.close()
        self._createConn()

    def close(self):
        self.cb._close()

    def counter_in(self, key, path, delta, create_parents=True, cas=0, ttl=0, persist_to=0, replicate_to=0):
        try:
            return self.cb.counter_in(key, path, delta, create_parents= create_parents, cas= cas, ttl= ttl, persist_to= persist_to, replicate_to= replicate_to)
        except CouchbaseError as e:
            raise

    def arrayappend_in(self, key, path, value, create_parents=True, cas=0, ttl=0, persist_to=0, replicate_to=0):
        try:
            return self.cb.arrayappend_in(key, path, value, create_parents=create_parents, cas=cas, ttl=ttl, persist_to=persist_to, replicate_to=replicate_to)
        except CouchbaseError as e:
            raise

    def arrayprepend_in(self, key, path, value, create_parents=True, cas=0, ttl=0, persist_to=0, replicate_to=0):
        try:
            return self.cb.arrayprepend_in(key, path, value, create_parents=create_parents, cas=cas, ttl=ttl, persist_to=persist_to, replicate_to=replicate_to)
        except CouchbaseError as e:
            raise

    def arrayaddunique_in(self, key, path, value, create_parents=True, cas=0, ttl=0, persist_to=0, replicate_to=0):
        try:
            return self.cb.addunique_in(key, path, value, create_parents=create_parents, cas=cas, ttl=ttl, persist_to=persist_to, replicate_to=replicate_to)
        except CouchbaseError as e:
            raise

    def arrayinsert_in(self, key, path, value, cas=0, ttl=0, persist_to=0, replicate_to=0):
        try:
            return self.cb.arrayinsert_in(key, path, value, cas=cas, ttl=ttl, persist_to=persist_to, replicate_to=replicate_to)
        except CouchbaseError as e:
            raise

    def remove_in(self, key, path,  cas=0, ttl=0):
        try:
            self.cb.remove_in(key, path, cas = cas, ttl = ttl)
        except CouchbaseError as e:
            raise

    def mutate_in(self, key, *specs, **kwargs):
        try:
            self.cb.mutate_in(key, *specs, **kwargs)
        except CouchbaseError as e:
            raise

    def lookup_in(self, key, *specs, **kwargs):
        try:
            self.cb.lookup_in(key, *specs, **kwargs)
        except CouchbaseError as e:
            raise

    def get_in(self, key, path):
        try:
            result = self.cb.get_in(key, path)
            return self.__translate_get(result)
        except CouchbaseError as e:
            raise

    def exists_in(self, key, path):
        try:
            self.cb.exists_in(key, path)
        except CouchbaseError as e:
            raise

    def replace_in(self, key, path, value, cas=0, ttl=0, persist_to=0, replicate_to=0):
        try:
            return self.cb.replace_in(key, path, value, cas=cas, ttl=ttl, persist_to=persist_to, replicate_to=replicate_to)
        except CouchbaseError as e:
            raise

    def insert_in(self, key, path, value, create_parents=True, cas=0, ttl=0, persist_to=0, replicate_to=0):
        try:
            return self.cb.insert_in(key, path, value, create_parents=create_parents, cas=cas, ttl=ttl, persist_to=persist_to, replicate_to=replicate_to)
        except CouchbaseError as e:
            raise

    def upsert_in(self, key, path, value, create_parents=True, cas=0, ttl=0, persist_to=0, replicate_to=0):
        try:
            return self.cb.upsert_in(key, path, value, create_parents=create_parents, cas=cas, ttl=ttl, persist_to=persist_to, replicate_to=replicate_to)
        except CouchbaseError as e:
            raise

    def append(self, key, value, cas=0, format=None, persist_to=0, replicate_to=0):
        try:
            self.cb.append(key, value, cas=cas, format=format, persist_to=persist_to, replicate_to=replicate_to)
        except CouchbaseError as e:
            try:
                time.sleep(10)
                self.cb.append(key, value, cas=cas, format=format, persist_to=persist_to, replicate_to=replicate_to)
            except CouchbaseError as e:
                raise

    def append_multi(self, keys, cas=0, format=None, persist_to=0, replicate_to=0):
        try:
            self.cb.append_multi(keys, cas=cas, format=format, persist_to=persist_to, replicate_to=replicate_to)
        except CouchbaseError as e:
            try:
                time.sleep(10)
                self.cb.append_multi(keys, cas=cas, format=format, persist_to=persist_to, replicate_to=replicate_to)
            except CouchbaseError as e:
                raise

    def prepend(self, key, value, cas=0, format=None, persist_to=0, replicate_to=0):
        try:
            self.cb.prepend(key, value, cas=cas, format=format, persist_to=persist_to, replicate_to=replicate_to)
        except CouchbaseError as e:
            try:
                self.cb.prepend(key, value, cas=cas, format=format, persist_to=persist_to, replicate_to=replicate_to)
            except CouchbaseError as e:
                raise

    def prepend_multi(self, keys, cas=0, format=None, persist_to=0, replicate_to=0):
        try:
            self.cb.prepend_multi(keys, cas=cas, format=format, persist_to=persist_to, replicate_to=replicate_to)
        except CouchbaseError as e:
            try:
                time.sleep(10)
                self.cb.prepend_multi(keys, cas=cas, format=format, persist_to=persist_to, replicate_to=replicate_to)
            except CouchbaseError as e:
                raise

    def replace(self, key, value, cas=0, ttl=0, format=None, persist_to=0, replicate_to=0):
        try:
           self.cb.replace( key, value, cas=cas, ttl=ttl, format=format,
                                    persist_to=persist_to, replicate_to=replicate_to)
        except CouchbaseError as e:
            try:
                time.sleep(10)
                self.cb.replace( key, value, cas=cas, ttl=ttl, format=format,
                                    persist_to=persist_to, replicate_to=replicate_to)
            except CouchbaseError as e:
                raise

    def replace_multi(self, keys, cas=0, ttl=0, format=None, persist_to=0, replicate_to=0):
        try:
            self.cb.replace_multi( keys, cas=cas, ttl=ttl, format=format, persist_to=persist_to, replicate_to=replicate_to)
        except CouchbaseError as e:
            try:
                time.sleep(10)
                self.cb.replace_multi( keys, cas=cas, ttl=ttl, format=format, persist_to=persist_to, replicate_to=replicate_to)
            except CouchbaseError as e:
                raise

    def cas(self, key, value, cas=0, ttl=0, format=None):
        return self.cb.replace(key, value, cas=cas,format=format)

    def delete(self,key, cas=0, quiet=True, persist_to=0, replicate_to=0):
        self.remove(key, cas=cas, quiet=quiet, persist_to=persist_to, replicate_to=replicate_to)

    def remove(self,key, cas=0, quiet=True, persist_to=0, replicate_to=0):
        try:
            return self.cb.remove(key, cas=cas, quiet=quiet, persist_to=persist_to, replicate_to=replicate_to)
        except CouchbaseError as e:
            try:
                time.sleep(10)
                return self.cb.remove(key, cas=cas, quiet=quiet, persist_to=persist_to, replicate_to=replicate_to)
            except CouchbaseError as e:
                raise

    def delete(self, keys, quiet=True, persist_to=0, replicate_to=0):
        return self.remove(self, keys, quiet=quiet, persist_to=persist_to, replicate_to=replicate_to)

    def remove_multi(self, keys, quiet=True, persist_to=0, replicate_to=0):
        try:
            self.cb.remove_multi(keys, quiet=quiet, persist_to=persist_to, replicate_to=replicate_to)
        except CouchbaseError as e:
            try:
                time.sleep(10)
                self.cb.remove_multi(keys, quiet=quiet, persist_to=persist_to, replicate_to=replicate_to)
            except CouchbaseError as e:
                raise

    def set(self, key, value, cas=0, ttl=0, format=None, persist_to=0, replicate_to=0):
        return self.upsert(key, value, cas=cas, ttl=ttl, format=format, persist_to=persist_to, replicate_to=replicate_to)

    def upsert(self, key, value, cas=0, ttl=0, format=None, persist_to=0, replicate_to=0):
        try:
            self.cb.upsert(key, value, cas, ttl, format, persist_to, replicate_to)
        except CouchbaseError as e:
            try:
                time.sleep(10)
                self.cb.upsert(key, value, cas, ttl, format, persist_to, replicate_to)
            except CouchbaseError as e:
                raise

    def set_multi(self, keys, ttl=0, format=None, persist_to=0, replicate_to=0):
        return self.upsert_multi(keys, ttl=ttl, format=format, persist_to=persist_to, replicate_to=replicate_to)

    def upsert_multi(self, keys, ttl=0, format=None, persist_to=0, replicate_to=0):
        try:
            self.cb.upsert_multi(keys, ttl=ttl, format=format, persist_to=persist_to, replicate_to=replicate_to)
        except CouchbaseError as e:
            try:
                time.sleep(10)
                self.cb.upsert_multi(keys, ttl=ttl, format=format, persist_to=persist_to, replicate_to=replicate_to)
            except CouchbaseError as e:
                raise

    def insert(self, key, value, ttl=0, format=None, persist_to=0, replicate_to=0):
        try:
            self.cb.insert(key, value, ttl=ttl, format=format, persist_to=persist_to, replicate_to=replicate_to)
        except CouchbaseError as e:
            try:
                time.sleep(10)
                self.cb.insert(key, value, ttl=ttl, format=format, persist_to=persist_to, replicate_to=replicate_to)
            except CouchbaseError as e:
                raise

    def insert_multi(self, keys,  ttl=0, format=None, persist_to=0, replicate_to=0):
        try:
            self.cb.insert_multi(keys, ttl=ttl, format=format, persist_to=persist_to, replicate_to=replicate_to)
        except CouchbaseError as e:
            try:
                time.sleep(10)
                self.cb.insert_multi(keys, ttl=ttl, format=format, persist_to=persist_to, replicate_to=replicate_to)
            except CouchbaseError as e:
                raise

    def touch(self, key, ttl = 0):
        try:
            self.cb.touch(key, ttl=ttl)
        except CouchbaseError as e:
            try:
                time.sleep(10)
                self.cb.touch(key, ttl=ttl)
            except CouchbaseError as e:
                raise

    def touch_multi(self, keys, ttl = 0):
        try:
            self.cb.touch_multi(keys, ttl=ttl)
        except CouchbaseError as e:
            try:
                time.sleep(10)
                self.cb.touch_multi(keys, ttl=ttl)
            except CouchbaseError as e:
                raise

    def decr(self, key, delta=1, initial=None, ttl=0):
        self.counter(key, delta=-delta, initial=initial, ttl=ttl)

    def decr_multi(self, keys, delta=1, initial=None, ttl=0):
        self.counter_multi(keys, delta=-delta, initial=initial, ttl=ttl)

    def incr(self, key, delta=1, initial=None, ttl=0):
        self.counter(key, delta=delta, initial=initial, ttl=ttl)

    def incr_multi(self, keys, delta=1, initial=None, ttl=0):
        self.counter_multi(keys, delta=delta, initial=initial, ttl=ttl)

    def counter(self, key, delta=1, initial=None, ttl=0):
        try:
            self.cb.counter(key, delta=delta, initial=initial, ttl=ttl)
        except CouchbaseError as e:
            try:
                time.sleep(10)
                self.cb.counter(key, delta=delta, initial=initial, ttl=ttl)
            except CouchbaseError as e:
                raise
    def counter_multi(self, keys, delta=1, initial=None, ttl=0):
        try:
            self.cb.counter_multi(keys, delta=delta, initial=initial, ttl=ttl)
        except CouchbaseError as e:
            try:
                time.sleep(10)
                self.cb.counter_multi(keys, delta=delta, initial=initial, ttl=ttl)
            except CouchbaseError as e:
                raise

    def get(self, key, ttl=0, quiet=True, replica=False, no_format=False):
        try:
            rv = self.cb.get(key, ttl=ttl, quiet=quiet, replica=replica, no_format=no_format)
            return self.__translate_get(rv)
        except CouchbaseError as e:
            try:
                time.sleep(10)
                rv = self.cb.get(key, ttl=ttl, quiet=quiet, replica=replica, no_format=no_format)
                return self.__translate_get(rv)
            except CouchbaseError as e:
                raise

    def rget(self, key, replica_index=None, quiet=True):
        try:
            data  = self.rget(key, replica_index=replica_index, quiet=None)
            return self.__translate_get(data)
        except CouchbaseError as e:
            try:
                time.sleep(10)
                data  = self.rget(key, replica_index=replica_index, quiet=None)
                return self.__translate_get(data)
            except CouchbaseError as e:
                raise

    def get_multi(self, keys, ttl=0, quiet=True, replica=False, no_format=False):
        try:
            data = self.cb.get_multi(keys, ttl=ttl, quiet=quiet, replica=replica, no_format=no_format)
            return self.__translate_get_multi(data)
        except CouchbaseError as e:
            try:
                time.sleep(10)
                data = self.cb.get_multi(keys, ttl=ttl, quiet=quiet, replica=replica, no_format=no_format)
                return self.__translate_get_multi(data)
            except CouchbaseError as e:
                raise

    def rget_multi(self, key, replica_index=None, quiet=True):
        try:
            data = self.cb.rget_multi(key, replica_index=None, quiet=quiet)
            return self.__translate_get_multi(data)
        except CouchbaseError as e:
            try:
                time.sleep(10)
                data = self.cb.rget_multi(key, replica_index=None, quiet=quiet)
                return self.__translate_get_multi(data)
            except CouchbaseError as e:
                raise

    def stats(self, keys=None):
        try:
            stat_map = self.cb.stats(keys = keys)
            return stat_map
        except CouchbaseError as e:
            try:
                time.sleep(10)
                return self.cb.stats(keys = keys)
            except CouchbaseError as e:
                raise

    def errors(self, clear_existing=True):
        try:
            rv = self.cb.errors(clear_existing = clear_existing)
            return rv
        except CouchbaseError as e:
            raise

    def observe(self, key, master_only=False):
        try:
            return self.cb.observe(key, master_only = master_only)
        except CouchbaseError as e:
            try:
                time.sleep(10)
                return self.cb.observe(key, master_only = master_only)
            except CouchbaseError as e:
                raise

    def observe_multi(self, keys, master_only=False):
        try:
            data = self.cb.observe_multi(keys, master_only = master_only)
            return self.__translate_observe_multi(data)
        except CouchbaseError as e:
            try:
                time.sleep(10)
                data = self.cb.observe_multi(keys, master_only = master_only)
                return self.__translate_observe_multi(data)
            except CouchbaseError as e:
                raise

    def endure(self, key, persist_to=-1, replicate_to=-1, cas=0, check_removed=False, timeout=5.0, interval=0.010):
        try:
            self.cb.endure(key, persist_to=persist_to, replicate_to=replicate_to,
                           cas=cas, check_removed=check_removed, timeout=timeout, interval=interval)
        except CouchbaseError as e:
            try:
                time.sleep(10)
                self.cb.endure(key, persist_to=persist_to, replicate_to=replicate_to,
                    cas=cas, check_removed=check_removed, timeout=timeout, interval=interval)
            except CouchbaseError as e:
                raise

    def endure_multi(self, keys, persist_to=-1, replicate_to=-1, cas=0, check_removed=False, timeout=5.0, interval=0.010):
        try:
            self.cb.endure(keys, persist_to=persist_to, replicate_to=replicate_to,
                           cas=cas, check_removed=check_removed, timeout=timeout, interval=interval)
        except CouchbaseError as e:
            try:
                time.sleep(10)
                self.cb.endure(keys, persist_to=persist_to, replicate_to=replicate_to,
                           cas=cas, check_removed=check_removed, timeout=timeout, interval=interval)
            except CouchbaseError as e:
                raise

    def lock(self, key, ttl=0):
        try:
            data = self.cb.lock(key, ttl = ttl)
            return self.__translate_get(data)
        except CouchbaseError as e:
            try:
                time.sleep(10)
                data = self.cb.lock(key, ttl = ttl)
                return self.__translate_get(data)
            except CouchbaseError as e:
                raise

    def lock_multi(self, keys, ttl=0):
        try:
            data = self.cb.lock_multi(keys, ttl = ttl)
            return self.__translate_get_multi(data)
        except CouchbaseError as e:
            try:
                time.sleep(10)
                data = self.cb.lock_multi(keys, ttl = ttl)
                return self.__translate_get_multi(data)
            except CouchbaseError as e:
                raise

    def unlock(self, key, ttl=0):
        try:
            return self.cb.unlock(key)
        except CouchbaseError as e:
            try:
                time.sleep(10)
                return self.cb.unlock(key)
            except CouchbaseError as e:
                raise

    def unlock_multi(self, keys):
        try:
            return self.cb.unlock_multi(keys)
        except CouchbaseError as e:
            try:
                time.sleep(10)
                return self.cb.unlock_multi(keys)
            except CouchbaseError as e:
                raise

    def n1ql_query(self, statement, prepared=False):
        try:
            return N1QLQuery(statement, prepared)
        except CouchbaseError as e:
            raise

    def n1ql_request(self, query):
        try:
            return N1QLRequest(query, self.cb)
        except CouchbaseError as e:
            raise

    def __translate_get_multi(self, data):
        map = {}
        if data == None:
            return map
        for key, result in data.items():
            map[key] = [result.flags, result.cas, result.value]
        return map

    def __translate_get(self, data):
        return data.flags, data.cas, data.value

    def __translate_delete(self, data):
        return data

    def __translate_observe(self, data):
        return data

    def __translate_observe_multi(self, data):
        map = {}
        if data == None:
            return map
        for key, result in data.items():
            map[key] = result.value
        return map

    def __translate_upsert_multi(self, data):
        map = {}
        if data == None:
            return map
        for key, result in data.items():
            map[key] = result
        return map

    def __translate_upsert_op(self, data):
        return data.rc, data.success, data.errstr, data.key
예제 #54
0
from couchbase.exceptions import KeyExistsError


# Connect to the default bucket on local host
cb = Bucket('couchbase://127.0.0.1/default')

# If you want to store the Python objects pickled and not as JSON
#cb.default_format = FMT_PICKLE

# Store a document
rv = cb.upsert('first', {'hello': 'world'})
cas = rv.cas
print(rv)

# Get the document
item = cb.get('first')
print(item)

# Overwrite the existing document only if the CAS value matched
try:
    # An exception will be raised if the CAS doesn't match
    wrong_cas = cas + 123
    cb.upsert('first', {'hello': 'world', 'additional': True}, cas=wrong_cas)
except KeyExistsError:
    # Get the correct current CAS value
    rv = cb.get('first')
    item, flags, correct_cas = rv.value, rv.flags, rv.cas
    # Set it again, this time with the correct CAS value
    rv = cb.upsert('first',
                   {'hello': 'world', 'additional': True},
                   cas=correct_cas)
예제 #55
0
파일: eql.py 프로젝트: yigitbasalma/EQL
class EQL(Db):
    def __init__(self,
                 logger,
                 router_mod_statistic=False,
                 router_mod=False,
                 watcher=False,
                 clustered=False,
                 with_static=False):
        self.logger = logger
        self.config = ConfigParser.ConfigParser()
        if router_mod:
            if watcher or clustered or with_static:
                raise RuntimeError(
                    "router_mod açıkken diğer modlar kullanılamaz.")
            self.config.read("/EQL/source/cdn.cfg")
            self.edge_locations = self.config.get("env",
                                                  "edge_locations").split(",")
            self.default_edge = self.config.get("env", "default_edge")
            continent_db = self.config.get("env", "continent_db")
            lb_db = self.config.get("env", "lb_db")
            self.cc_db = Db(continent_db)
            if os.path.exists(lb_db): os.remove(lb_db)
            Db.__init__(self, lb_db)
            self.write(
                "CREATE TABLE edge_status(SERVER VARCHAR(200) PRIMARY KEY,STATUS VARCHAR(50), REGION VARCHAR(5))"
            )
            check_interval = int(self.config.get("env", "edge_check_interval"))
            p = Process(target=self._health_check_edge_server,
                        name="EQL_Watcher",
                        kwargs={"check_interval": check_interval})
            p.start()
            self.router_mod = True
            if router_mod_statistic:
                self.request_statistic = Bucket("couchbase://{0}/{1}".format(
                    self.config.get("env", "cbhost"),
                    self.config.get("env", "statistic_bucket")),
                                                lockmode=2)
                self.with_statistic = True
        if not router_mod:
            if router_mod_statistic:
                raise RuntimeError(
                    "Bu özellik sadece router_mod ile birlikte kullanılabilir."
                )
            self.router_mod = False
            self.config.read("/EQL/source/config.cfg")
            self.cache_bucket = Bucket("couchbase://{0}/{1}".format(
                self.config.get("env", "cbhost"),
                self.config.get("env", "cache_bucket")),
                                       lockmode=2)
            self.statistic_bucket = Bucket("couchbase://{0}/{1}".format(
                self.config.get("env", "cbhost"),
                self.config.get("env", "statistic_bucket")),
                                           lockmode=2)
            self.server = self.config.get("env", "server")
            self.clustered = clustered
            self.timeout = float(self.config.get("env", "timeout"))
            self.img_file_expire = int(
                self.config.get("env", "img_file_expire")) * 24 * 60 * 60
        if clustered:
            lb_db = self.config.get("env", "lb_db")
            if os.path.exists(lb_db): os.remove(lb_db)
            Db.__init__(self, lb_db)
            self.write(
                "CREATE TABLE lb(HOST VARCHAR(100) PRIMARY KEY, STATUS VARCHAR(20), WEIGHT INT(3) DEFAULT '0')"
            )
            self.clustered = True
            self._health_check_cluster(first=True)
        if with_static:
            self.mime_type = {
                "css": "text/css",
                "js": "application/javascript"
            }
            self.root_directory = str(self.config.get("env", "root_directory"))
            self.static_file_expire = int(
                self.config.get("env", "static_file_expire")) * 24 * 60 * 60
        if watcher:
            if not clustered:
                raise RuntimeError(
                    "clustered parametresi açılmadan watcher kullanılamaz.")
            check_interval = int(self.config.get("env", "check_interval"))
            p = Process(target=self._health_check_cluster,
                        name="EQL_Watcher",
                        kwargs={"check_interval": check_interval})
            p.start()

    def _health_check_cluster(self, first=False, check_interval=3):
        if first:
            cluster = self.config.get("env", "cluster").split(",")
            cluster.append(self.server)
            url = self.config.get("env", "health_check_url")
            weight = 1
            for server in cluster:
                status = None
                try:
                    req = requests.get("http://{0}{1}".format(server, url),
                                       timeout=self.timeout)
                    status = "up" if req.status_code == 200 else "down"
                except requests.exceptions.Timeout:
                    status = "down"
                except requests.exceptions.ConnectionError:
                    status = "down"
                finally:
                    if status == "down":
                        self.logger.log_save(
                            "EQL", "ERROR",
                            "{0} Sunucusu down.".format(server))
                    self.write(
                        "INSERT INTO lb VALUES ('{0}', '{1}', '{2}')".format(
                            server, status, weight))
                    weight += 1
            return True

        while True:
            cluster = [i[0] for i in self.readt("SELECT HOST FROM lb")]
            url = self.config.get("env", "health_check_url")
            weight = 1
            for server in cluster:
                status = None
                try:
                    req = requests.get("http://{0}{1}".format(server, url),
                                       timeout=self.timeout)
                    status = "up" if req.status_code == 200 else "down"
                except requests.exceptions.Timeout:
                    status = "down"
                except requests.exceptions.ConnectionError:
                    status = "down"
                finally:
                    try:
                        if status == "down":
                            self.logger.log_save(
                                "EQL", "ERROR",
                                "{0} Sunucusu down.".format(server))
                        self.write(
                            "INSERT INTO lb VALUES ('{0}', '{1}', '{2}')".
                            format(server, status, weight))
                    except sqlite3.IntegrityError:
                        self.write(
                            "UPDATE lb SET STATUS='{0}', WEIGHT='{1}' WHERE HOST='{2}'"
                            .format(status, weight, server))
                    weight += 1
            time.sleep(int(check_interval))

    def _is_cached(self, url):
        urls = h.md5(url).hexdigest()
        try:
            values = self.cache_bucket.get(urls).value
            type_ = self._statistic(urls, r_turn=True)
            if type_ is None:
                raise ValueError()
            return True, values, type_
        except (couchbase.exceptions.NotFoundError, ValueError):
            try:
                req = requests.get("http://{0}/{1}".format(self.server, url),
                                   timeout=self.timeout)
            except requests.exceptions.Timeout:
                if not self.clustered:
                    self.logger.log_save(
                        "EQL", "CRITIC", "Backend server timeout hatası aldı.")
                    return False, int(500)
                while True:
                    pool = self._get_server()
                    try:
                        try:
                            self.server = pool.next()
                            req = requests.get("http://{0}/{1}".format(
                                self.server, url),
                                               timeout=self.timeout)
                            if req.status_code == 200: break
                        except (requests.exceptions.Timeout,
                                requests.exceptions.ConnectionError):
                            pass
                    except StopIteration:
                        self.logger.log_save(
                            "EQL", "CRITIC",
                            "Tüm backend serverlar timeout hatası aldı.")
                        return False, int(500)
            if req.status_code == 200:
                self._cache_item(urls, req.content)
                self._statistic(urls, req.headers.get('content-type'))
                return True, req.content, req.headers.get('content-type')
            else:
                return False, int(req.status_code)

    def _cache_item(self, url, img, static_file=False):
        try:
            if static_file:
                self.cache_bucket.insert(url,
                                         img,
                                         format=couchbase.FMT_BYTES,
                                         ttl=self.static_file_expire)
            else:
                self.cache_bucket.insert(url,
                                         img,
                                         format=couchbase.FMT_BYTES,
                                         ttl=self.img_file_expire)
        except couchbase.exceptions.KeyExistsError:
            pass
        finally:
            return True

    def _statistic(self, url, type_=None, r_turn=False, static_file=False):
        if static_file:
            expire = self.static_file_expire
        else:
            expire = self.img_file_expire
        try:
            values = self.statistic_bucket.get(url).value
            count, timestamp, type_ = values[0], values[1], values[2]
            count += 1
            obj = [count, timestamp, type_]
            self.statistic_bucket.replace(url, obj)
        except couchbase.exceptions.NotFoundError:
            if r_turn:
                if type_ is None:
                    return False
            count = 1
            obj = [
                count,
                datetime.datetime.now().strftime("%Y-%m-%d %H:%I:%S"), type_
            ]
            self.statistic_bucket.insert(url, obj, ttl=int(expire))
        finally:
            if r_turn:
                return type_
            return True

    def _get_server(self):
        cluster = self.readt(
            "SELECT HOST,WEIGHT FROM lb WHERE STATUS='up' ORDER BY WEIGHT ASC")
        itr = 1
        while len(cluster) >= itr:
            yield cluster[itr - 1][0]
            itr += 1

    def route_request(self, url, from_file=False):
        # return status, data, mime type
        if self.router_mod:
            raise RuntimeError("router_mod açıkken bu özellik kullanılamaz.")
        if from_file:
            urls = h.md5(url).hexdigest()
            try:
                values = self.cache_bucket.get(urls).value
                type_ = self._statistic(urls, r_turn=True)
                return True, values, type_
            except couchbase.exceptions.NotFoundError:
                try:
                    file_ = open(self.root_directory + str(url))
                except IOError:
                    return False, int(500)
                data = file_.read()
                type_ = self.mime_type[url.split(".")[-1]]
                self._cache_item(urls, data, static_file=True)
                self._statistic(urls, type_, static_file=True)
                return True, data, type_

        return self._is_cached(url)

    # router_mod işlemleri başlangıcı

    def _health_check_edge_server(self, check_interval=3):
        while True:
            for edge_location in self.edge_locations:
                health_check_url = self.config.get(edge_location,
                                                   "health_check_url")
                timeout = self.config.get(edge_location, "timeout")
                cluster = self.config.get(edge_location, "servers").split(",")
                for server in cluster:
                    status = None
                    try:
                        req = requests.get("http://{0}{1}".format(
                            server, health_check_url),
                                           timeout=float(timeout))
                        status = "up" if req.status_code == 200 else "down"
                    except requests.exceptions.Timeout:
                        status = "down"
                    except requests.exceptions.ConnectionError:
                        status = "down"
                    finally:
                        try:
                            if status == "down":
                                self.logger.log_save(
                                    "EQL", "ERROR",
                                    "{0} Sunucusu down.".format(server))
                            self.write(
                                "INSERT INTO edge_status VALUES ('{0}', '{1}', '{2}')"
                                .format(server, status, edge_location))
                        except sqlite3.IntegrityError:
                            self.write(
                                "UPDATE edge_status SET STATUS='{0}', REGION='{2}' WHERE SERVER='{1}'"
                                .format(status, server, edge_location))
            time.sleep(int(check_interval))

    def _put_statistic(self, country_code, url):
        urls = h.md5(url)
        try:
            values = self.request_statistic.get(urls).value
            count, timestamp, countries = values[0], values[1], list(values[2])
            countries.append(country_code)
            countries = list(set(countries))
            count += 1
            obj = [count, timestamp, countries]
            self.request_statistic.replace(urls, obj)
        except couchbase.exceptions.NotFoundError:
            count = 1
            countries = [country_code]
            obj = [
                count,
                datetime.datetime.now().strftime("%Y-%m-%d %H:%I:%S"),
                countries
            ]
            self.request_statistic.insert(url, obj)
        finally:
            return True

    def _get_best_edge(self, country_code):
        request_from = self.cc_db.readt(
            "SELECT CONTINENT FROM country_code WHERE CC='{0}'".format(
                country_code))[0][0]
        region = request_from if request_from in self.edge_locations else self.default_edge
        return self.readt(
            "SELECT SERVER FROM edge_status WHERE STATUS='up' AND REGION='{0}'"
            .format(region))[0][0]

    def route_to_best_edge(self, url, origin_ip):
        origin = geolite2.lookup(origin_ip)
        if self.with_statistic:
            self._put_statistic(origin, url)
        if origin is not None:
            return True, "http://{0}/{1}".format(
                self._get_best_edge(origin.country), url)
        return True, "http://{0}/{1}".format(
            self._get_best_edge(self.default_edge), url)
def get_doc(bucket: Bucket, *, doc_id: str, doc_model: Type[BaseModel]):
    result = bucket.get(doc_id, quiet=True)
    if not result.value:
        return None
    model = doc_model(**result.value)
    return model
예제 #57
0
    def create_bucket(self, name, ram_quota_mb=1024):
        """
        1. Create CBS bucket via REST
        2. Create client connection and poll until bucket is available
           Catch all connection exception and break when KeyNotFound error is thrown
        3. Verify all server nodes are in a 'healthy' state before proceeding

        Followed the docs below that suggested this approach.
        http://docs.couchbase.com/admin/admin/REST/rest-bucket-create.html
        """

        log_info("Creating bucket {} with RAM {}".format(name, ram_quota_mb))

        server_version = get_server_version(self.host, self.cbs_ssl)
        server_major_version = int(server_version.split(".")[0])

        data = {
            "name": name,
            "ramQuotaMB": str(ram_quota_mb),
            "authType": "sasl",
            "bucketType": "couchbase",
            "flushEnabled": "1"
        }

        if server_major_version <= 4:
            # Create a bucket with password for server_major_version < 5
            # proxyPort should not be passed for 5.0.0 onwards for bucket creation
            data["saslPassword"] = "******"
            data["proxyPort"] = "11211"

        resp = None
        try:
            resp = self._session.post("{}/pools/default/buckets".format(
                self.url),
                                      data=data)
            log_r(resp)
            resp.raise_for_status()
        except HTTPError as h:
            log_info("resp code: {}; resp text: {}; error: {}".format(
                resp, resp.json(), h))
            raise

        # Create a user with username=bucketname
        if server_major_version >= 5:
            self._create_internal_rbac_bucket_user(name)

        # Create client an retry until KeyNotFound error is thrown
        start = time.time()
        while True:

            if time.time() - start > keywords.constants.CLIENT_REQUEST_TIMEOUT:
                raise Exception(
                    "TIMEOUT while trying to create server buckets.")
            try:
                bucket = Bucket("couchbase://{}/{}".format(self.host, name),
                                password='******')
                bucket.get('foo')
            except NotFoundError:
                log_info("Key not found error: Bucket is ready!")
                break
            except CouchbaseError as e:
                log_info("Error from server: {}, Retrying ...".format(e))
                time.sleep(1)
                continue

        self.wait_for_ready_state()

        return name
예제 #58
0
def load_as_separate_docs(bucketname, count, limit):
    bucket_name = bucketname

    print("Connecting to postgres")
    conn = psycopg2.connect(
        "dbname='musicbrainz_db' user='******' host='localhost' password='******'"
    )
    curr = conn.cursor()

    print("Fetching all tracks")
    curr.execute(
        "select id, gid, artist_credit, name, length, comment, edits_pending, "
        "last_updated from recording offset {} limit {}".format(count, limit))
    rows = curr.fetchall()

    print("Connecting to Couchbase")
    cb = Bucket("couchbase://{}/{}?operation_timeout=10".format(
        cb_url, bucket_name),
                password="")

    print("Loading data")
    key_counter = count
    for row in rows:
        document = {}
        subdocument = {}

        document["id"] = row[0]
        document["gid"] = row[1]
        document["artist"] = None
        document["name"] = row[3]
        document["length"] = row[4]
        document["comment"] = row[5]
        document["edits_pending"] = not row[6] == 0
        document["last_updated"] = str(row[7])

        id = row[2]
        query = "select artist_credit_name.artist from artist_credit_name, artist_credit" \
                " where artist_credit_name.artist_credit = artist_credit.id " \
                "and artist_credit.id = {} limit 1".format(id)

        curr.execute(query)
        credit = curr.fetchall()[0]

        query = "select artist.name, artist.sort_name, artist.begin_date_year, artist.begin_date_month, " \
                "artist.begin_date_day, artist.end_date_year, artist.end_date_month, artist.end_date_day, " \
                "artist.comment, artist.edits_pending, artist.last_updated, artist.id " \
                "from artist where artist.id = {} limit 1".format(credit[0])

        curr.execute(query)
        artist = curr.fetchall()[0]
        words = artist.split(' ')
        if len(words) > 10:
            words = words[:9]
            artist_key = "{}:".format(artist[11])
            for word in words:
                artist_key = "{} {}".format(artist_key, word)
        else:
            artist_key = "{}:{}".format(artist[11], artist[0])

        try:
            subdocument = cb.get(artist_key).value
        except NotFoundError:
            subdocument = {}
            subdocument["name"] = artist[0]
            subdocument["sort_name"] = artist[1]
            subdocument["begin_date_year"] = artist[2]
            subdocument["begin_date_month"] = artist[3]
            subdocument["begin_date_day"] = artist[4]
            subdocument["end_date_year"] = artist[5]
            subdocument["end_date_month"] = artist[6]
            subdocument["end_date_day"] = artist[7]
            subdocument["comment"] = artist[8]
            subdocument["edits_pending"] = not artist[9] == 0
            subdocument["last_updated"] = str(artist[10])
            subdocument["id"] = artist[11]
            subdocument["track_id"] = list()

        key = hex(key_counter)[2:]

        document["artist"] = artist_key
        subdocument["track_id"].append(key)

        try:
            cb.upsert(key, document)
            cb.upsert(artist_key, subdocument)
            key_counter += 1

        except Exception:
            print(document)
            print(subdocument)