def _createView(self, designDocument, viewName, mapFunction, reduceFunction):
     bucket = Bucket(self._bucketUrl)
     if reduceFunction == None:
         designDocument["views"][viewName] = {"map": mapFunction}
     else:
         designDocument["views"][viewName] = {"map": mapFunction, "reduce": reduceFunction}
     bucket.design_create(self.designDocumentName, designDocument, False, 10000)
Beispiel #2
0
 def get(self, id):
     bucket = Bucket(self._bucketUrl)
     try:
         result = bucket.get(id)
         return result.value
     except NotFoundError:
         return None
def main():
    from argparse import ArgumentParser
    import time

    ap = ArgumentParser()
    ap.add_argument('-C', '--couchbase',
                    help='Couchbase connection string',
                    default='couchbase://localhost')
    ap.add_argument('-M', '--memcached',
                    help='List of memcached hosts to use in host:port format',
                    action='append',
                    default=[])
    options = ap.parse_args()

    # Get memcached hosts
    mc_hosts = []
    for server in options.memcached:
        host, port = server.split(':')
        port = int(port)
        mc_hosts.append((host, port))

    mirror = CouchbaseMemcacheMirror(options.couchbase, mc_hosts)
    value = {
        'entry': 'Mirror value',
        'updated': time.time()
    }
    mirror.set('mirrkey', value)

    # Create individual clients, to demonstrate
    cb = CbBucket(options.couchbase)
    print 'Value from couchbase: {}'.format(cb.get('mirrkey').value)
    print 'Value from Memcached: {}'.format(McClient(mc_hosts).get('mirrkey'))
Beispiel #4
0
def parseAndSave(s):
  for l in s.find (id="data_list").findAll("tr"):
    try:
      tagALinkToDetail = l.find(has_link_to_show_php)
      if tagALinkToDetail: 
        fullText=tagALinkToDetail.text.strip()
        logger.info("Searching info in douban with keyword: %s" % fullText)
        
        m = Movie(fullText)
        m.source_link = tagALinkToDetail['href']
        m.numberOfSeeds=int(tagALinkToDetail.parent.parent.find(class_='bts_1').text)
        m.source_link=str(tagALinkToDetail["href"])
        bucket = Bucket('couchbase://localhost/default')
        v=json.dumps(m.__dict__)
        bucket.upsert(fullText,v)
      else:
        logger.debug("skip a line in mp4ba resource page, most likely because it is an ads")
    except AttributeError as ae:
      logger.error (ae)
      logger.error ("Error! Skipped when trying to parse: %s" % l)
      exit 
    except Exception as e:
      logger.error (e)
      logger.error ("Error! Skipped when trying to parse: %s" % l)
      exit
Beispiel #5
0
 def check_dataloss(self, server, bucket):
     from couchbase.bucket import Bucket
     from couchbase.exceptions import NotFoundError
     from lib.memcached.helper.data_helper import VBucketAwareMemcached
     bkt = Bucket('couchbase://{0}/{1}'.format(server.ip, bucket.name))
     rest = RestConnection(self.master)
     VBucketAware = VBucketAwareMemcached(rest, bucket.name)
     _, _, _ = VBucketAware.request_map(rest, bucket.name)
     batch_start = 0
     batch_end = 0
     batch_size = 10000
     errors = []
     while self.num_items > batch_end:
         batch_end = batch_start + batch_size
         keys = []
         for i in xrange(batch_start, batch_end, 1):
             keys.append(str(i).rjust(20, '0'))
         try:
             bkt.get_multi(keys)
             self.log.info("Able to fetch keys starting from {0} to {1}".format(keys[0], keys[len(keys)-1]))
         except Exception as e:
             self.log.error(e)
             self.log.info("Now trying keys in the batch one at a time...")
             key = ''
             try:
                 for key in keys:
                     bkt.get(key)
             except NotFoundError:
                 vBucketId = VBucketAware._get_vBucket_id(key)
                 errors.append("Missing key: {0}, VBucketId: {1}".
                               format(key, vBucketId))
         batch_start += batch_size
     return errors
 def test_function_where_handler_code_takes_more_time_to_execute_than_execution_timeout(self):
     keys = ['customer123', 'customer1234', 'customer12345']
     url = 'couchbase://{ip}/{name}'.format(ip=self.master.ip, name=self.src_bucket_name)
     bucket = Bucket(url, username="******", password="******")
     for doc_id in keys:
         bucket.upsert(doc_id, {'name' : doc_id})
     # create a function which sleeps for 5 secs and set execution_timeout to 1s
     body = self.create_save_function_body(self.function_name, HANDLER_CODE_ERROR.EXECUTION_TIME_MORE_THAN_TIMEOUT,
                                           execution_timeout=1)
     # deploy the function
     self.deploy_function(body)
     # This is intentionally added so that we wait for some mutations to process and we decide none are processed
     self.sleep(60)
     # No docs should be present in dst_bucket as the all the function executions should have timed out
     self.verify_eventing_results(self.function_name, 0, skip_stats_validation=True)
     eventing_nodes = self.get_nodes_from_services_map(service_type="eventing", get_all_nodes=True)
     exec_timeout_count = 0
     for eventing_node in eventing_nodes:
         rest_conn = RestConnection(eventing_node)
         out = rest_conn.get_all_eventing_stats()
         # get sum of all timeout_count
         exec_timeout_count += out[0]["failure_stats"]["timeout_count"]
     # check whether all the function executions timed out and is equal to number of docs created
     if exec_timeout_count != len(keys):
         self.fail("Not all event executions timed out : Expected : {0} Actual : {1}".format(len(keys),
                                                                                             exec_timeout_count))
     self.undeploy_and_delete_function(body)
 def _update_document(self, key, document):
     url = 'couchbase://{ip}/default'.format(ip=self.master.ip)
     if self.upgrade_to.startswith("4"):
         bucket = Bucket(url)
     else:
         bucket = Bucket(url, username="******", password="******")
     bucket.upsert(key, document)
    def test_add_concurrent(self):
        DOCID = 'subdoc_doc_id'
        CONNSTR = 'couchbase://' + self.servers[0].ip + ':11210'
        ITERATIONS = 200
        THREADS = 20

        main_bucket = SDK_Bucket(CONNSTR)
        main_bucket.upsert(DOCID, {'recs':[]})

        thrs = []

        class Runner(Thread):
            def run(self, *args, **kw):
                cb = SDK_Bucket(CONNSTR)
                for x in range(ITERATIONS):
                    cb.mutate_in(DOCID, SD.array_append('recs', 1))

        thrs = [Runner() for x in range(THREADS)]
        [t.start() for t in thrs]
        [t.join() for t in thrs]

        obj = main_bucket.get(DOCID)

        array_entry_count = len(obj.value['recs'])

        self.assertTrue( array_entry_count ==  ITERATIONS * THREADS,
                         'Incorrect number of array entries. Expected {0} actual {1}'.format(ITERATIONS * THREADS,
                                                                           array_entry_count))
Beispiel #9
0
def confirm_ir(request, ir_id, group_id, user_id):
    print("IR CONFIRMATION")
    c = Bucket('couchbase://localhost/nihongo')
    user_doc = c.get(user_id).value
    password = user_doc['password']
    ir_doc = c.get(ir_id).value


    if 'accept' in request.POST:
        print("IR ACCEPTED")
        ir_doc['confirmed'] = 'accepted'
        sync_user = SyncGateway.get_user(user_id)
        new_sync_user = {}
        admin_channels = sync_user['admin_channels']
        #all_channels = sync_user['all_channels']
        admin_channels.append(group_id)

        SyncGateway.put_user(sync_user['name'], '*****@*****.**', password, admin_channels)
        print(sync_user)

    elif 'decline' in request.POST:
        ir_doc['confirmed'] = 'declined'
        print("IR DECLINED")
    c.upsert(ir_id, ir_doc)
    return HttpResponseRedirect(reverse('tutor:tutor_groups'))
Beispiel #10
0
 def get(self,device_id):
     bucket = Bucket('couchbase://46.101.11.33:8091/devices')
     res = bucket.get(device_id, quiet=True)
     if res.success:
         return res.value
     else:
         return {"errCode": "-1", "errMsg": "Could not find device %s" % device_id}
Beispiel #11
0
def register(request):
    args = {}
    args.update(csrf(request))
    args['form'] = UserCreationForm()
    if request.POST:
        newuser_form = UserCreationForm(request.POST)
        if newuser_form.is_valid():
            newuser_form.save()
            newuser = auth.authenticate(username=newuser_form.cleaned_data['username'], password=newuser_form.cleaned_data['password2'])
            auth.login(request, newuser)
            #Adding user to main bucket
            try:
                c = Bucket('couchbase://localhost/nihongo')
                username = newuser_form.cleaned_data['username']
                password = newuser_form.cleaned_data['password2']
                c_username = '******' + username
                new_user = {'username' : username, 'password' : password,  'doc_type' : 'user_doc'}
                doc_channels = [c_username]
                new_user['doc_channels'] = doc_channels
                c.upsert(c_username, new_user)
            except CouchbaseError as ce:
                raise Http404("Couchbase server error")
            #Adding user to sync gateway database
            SyncGateway.put_user(c_username, '*****@*****.**', password, [c_username])
            return redirect('/')
        else:
            args['form'] = newuser_form
    return render_to_response('register.html', args)
Beispiel #12
0
    def get_phrases(self, cb_url, output_file, input_file, docs_total):
        cb = Bucket("couchbase://{}/{}?operation_timeout=10".format(cb_url, "bucket-1"), password="******")
        lines = self._shuffle_and_cut(input_file, 10 ** 6)
        formatted_lines = list()
        for line in lines:
            formatted_lines.append(line.split()[0])
        lines = formatted_lines
        results = set()
        for docid in range(1, docs_total - 1):
            key = hex(random.randint(1, docs_total))[2:]
            try:
                txt = cb.get(key).value
                if txt["text"]:
                    txt = txt["text"].encode('ascii', 'ignore')
                    terms = txt.split(' ')
                    for idx, term in enumerate(terms):
                        if term in lines:
                            if len(terms) > idx + 1:
                                term_next = terms[idx + 1]
                                if str.isalpha(term_next):
                                    result_phrase = "{} {}".format(term, term_next)
                                    results.add(result_phrase)
            except Exception as e:
                print(("{}: {}: {}".format(key, len(results), str(e))))

            if len(results) > self.limit:
                break

        output_file = open(output_file, "w")
        for phrase in results:
            print(phrase, file=output_file)
 def test_reject_ephemeral_attempt(self):
     if not self._realserver_info:
         raise SkipTest("Need real server")
     admin=self.make_admin_connection()
     bucket_name = 'ephemeral'
     users=[('writer',('s3cr3t',[('data_reader', 'ephemeral'), ('data_writer', 'ephemeral')])),
            ('reader',('s3cr3t',[('data_reader', 'ephemeral')])),
            ('viewer',('s3cr3t',[('views_reader', 'ephemeral'), ('views_admin', 'ephemeral')]))]
     user=users[2]
     (userid, password, roles) = user[0],user[1][0],user[1][1]
     # add user
     try:
         admin.bucket_delete(bucket_name)
     except:
         pass
     try:
         admin.bucket_create(name=bucket_name,
                                  bucket_type='ephemeral',
                                  ram_quota=100)
     except HTTPError:
         raise SkipTest("Unable to provision ephemeral bucket")
     try:
         admin.user_upsert(AuthDomain.Local, userid, password, roles)
         admin.wait_ready(bucket_name, timeout=10)
         conn_str = "couchbase://{0}/{1}".format(self.cluster_info.host, bucket_name)
         bucket = Bucket(connection_string=conn_str,username=userid,password=password)
         self.assertIsNotNone(bucket)
         self.assertRaisesRegex(NotSupportedError, "Ephemeral", lambda: bucket.query("beer", "brewery_beers", streaming=True, limit=100))
     finally:
         admin.bucket_delete(bucket_name)
         admin.user_remove(AuthDomain.Local, userid)
Beispiel #14
0
 def createids():
     cb = Bucket('couchbase://172.23.123.38/bucket-1', password='******')
     row_iter = cb.n1ql_query(N1QLQuery('select meta().id from `bucket-1` limit 10000'))
     for resultid in row_iter:
         '''
         use following to create the docids set
         '''
         print(resultid["id"], None)
 def _get_documets(self, bucket_name, field):
     bucket = Bucket('couchbase://{ip}/{name}'.format(ip=self.master.ip, name=bucket_name))
     if not bucket:
         log.info("Bucket connection is not established.")
     log.info("Updating {0} in all documents in bucket {1}...".format(field, bucket_name))
     query = "SELECT * FROM {0}".format(bucket_name)
     for row in bucket.n1ql_query(query):
         yield row[bucket.bucket]['_id'], bucket.get(key=row[bucket.bucket]['_id']).value
 def createMapView(self, viewName, mapFunction, reduceFunction):
     bucket = Bucket(self._bucketUrl)
     try:
         designDocument = bucket.design_get(self.designDocumentName, False)
         self._createView(designDocument.value, viewName, mapFunction, reduceFunction)
     except HTTPError:
         designDocument = {"views": {}}
         self._createView(designDocument, viewName, mapFunction, reduceFunction)
Beispiel #17
0
def get_word_type (type):
    cb = Bucket(COUCHBASE)
    # fails if you do not create an index on type
    #   $ /Applications/Couchbase\ Server.app/Contents/Resources/couchbase-core/bin/cbq
    #   cbq> CREATE PRIMARY INDEX ON default USING GSI;
    query = N1QLQuery("SELECT cnt, word, type FROM `default` WHERE type=$q_type", q_type=type)
    print query
    for row in cb.n1ql_query(query):
        print row
Beispiel #18
0
class Connection(object):
    def __init__(self, connection_string):
        self.bucket = Bucket(connection_string)

    def cursor(self):
        return Cursor(self)

    def close(self):
        self.bucket._close()
Beispiel #19
0
def request_invite(request, user_id, group_id):
    default = 'request for invite from' + user_id
    request_text = request.POST.get('request_text', default)
    c = Bucket('couchbase://localhost/nihongo')
    invite_request = {'doc_type' : 'invite_request', 'user_id' : user_id, 'group_id' : group_id}
    invite_request['request_text'] = request_text
    invite_request['confirmed'] = "pending"
    ireq ='ireq_'  + str(uuid4()).replace('-', '_')
    c.upsert(ireq, invite_request)
    return HttpResponseRedirect(reverse('tutor:tutor_groups'))
Beispiel #20
0
 def get(self,info):
     device_id, temp, type, status = info.split(':')
     bucket = Bucket('couchbase://46.101.11.33:8091/devices')
     res = bucket.get(device_id, quiet=True)
     if res.success:
         bucket.n1ql_query('UPSERT INTO devices (KEY,VALUE) VALUES ("%s",{"device_id":"%s", "temp":"%s", "type":"%s", "status":"%s"})' % (device_id, device_id, temp, type, status)).execute()
         res = bucket.get(device_id, quiet=True)
         return res.value
     else:
         return {"errCode": "-1", "errMsg": "Could not find device %s" % device_id}
Beispiel #21
0
def query_sports (params):
    cb = Bucket(COUCHBASE)
    where = []
    for key in params.keys():
        where.append('%s="%s"' % (key, params[key]))
    query = N1QLQuery('SELECT * FROM `sports` WHERE ' + ' AND '.join(where))
    result = []
    for row in cb.n1ql_query(query):
        result.append(row)
    return result
 def _get_documets(self, bucket_name, field):
     bucket = Bucket('couchbase://{ip}/{name}'.format(ip=self.master.ip, name=bucket_name))
     if not bucket:
         log.info("Bucket connection is not established.")
     log.info("Updating {0} in all documents in bucket {1}...".format(field, bucket_name))
     for i in range(self.docs_per_day):
         for j in range(self.docs_per_day):
             key = "array_dataset-" + str(i) + "-" + str(j)
             document = bucket.get(key=key).value
             yield key, document
def usernameIsValue(username):
    print username
    try:
        bucket = Bucket("couchbase://localhost/default")
        rv = bucket.get(username)
        if rv is not None:
            return True

    except Exception as e:
        print "not found"
        send_simple_message(username)
Beispiel #24
0
class CBGen(CBAsyncGen):

    TIMEOUT = 10  # seconds

    def __init__(self, ssl_mode: str = 'none', n1ql_timeout: int = None, **kwargs):

        connection_string = 'couchbase://{host}/{bucket}?password={password}&{params}'
        connstr_params = parse.urlencode(kwargs["connstr_params"])

        if ssl_mode == 'data':
            connection_string = connection_string.replace('couchbase',
                                                          'couchbases')
            connection_string += '&certpath=root.pem'

        connection_string = connection_string.format(host=kwargs['host'],
                                                     bucket=kwargs['bucket'],
                                                     password=kwargs['password'],
                                                     params=connstr_params)

        self.client = Bucket(connection_string=connection_string)
        self.client.timeout = self.TIMEOUT
        if n1ql_timeout:
            self.client.n1ql_timeout = n1ql_timeout
        logger.info("Connection string: {}".format(connection_string))

    @quiet
    @backoff
    def create(self, *args, **kwargs):
        super().create(*args, **kwargs)

    @quiet
    @backoff
    @timeit
    def read(self, *args, **kwargs):
        super().read(*args, **kwargs)

    @quiet
    @backoff
    @timeit
    def update(self, *args, **kwargs):
        super().update(*args, **kwargs)

    @quiet
    def delete(self, *args, **kwargs):
        super().delete(*args, **kwargs)

    @timeit
    def view_query(self, ddoc: str, view: str, query: ViewQuery):
        tuple(self.client.query(ddoc, view, query=query))

    @quiet
    @timeit
    def n1ql_query(self, query: N1QLQuery):
        tuple(self.client.n1ql_query(query))
Beispiel #25
0
def update_profile(request):
	try:
		user_dict = request.GET
		users_bucket = Bucket('couchbase://localhost/users_us')
		if 'uid' in user_dict:
			users_bucket.upsert(request.GET.get('uid'), user_dict)
			return HttpResponse("successfully updated user profile")
		else:
			return HttpResponseBadRequest('uid not found')
	
	except Exception, e:
		return HttpResponseServerError("internal server error. Error : {e}".format(e=str(e)))
Beispiel #26
0
 def getByView(self, parameter):
     bucket = Bucket(self._bucketUrl)
     options = Query()
     options.mapkey_range = (str(parameter), str(parameter))
     options.stale = False
     rows = bucket.query(self.designDocument, self._viewName, query=options)
     # the resulting row view from bucket.query is [key, value, docid, doc]
     # since we want docids, select the elements with index 2
     docids = [row[2] for row in rows]
     if len(docids) == 0:
         return []
     results = bucket.get_multi(docids).values()
     return [result.value for result in results]
Beispiel #27
0
    def post(self, request, group_id):
        c = Bucket('couchbase://localhost/nihongo')
        success = 'dunno'
        constgroup = group_id.rsplit('_', 1)[0]
        print(constgroup)
        print('adding new deck')
        try:
            description = request.POST['description']
            print(description)
            ckey = 'deck_' + str(uuid4()).replace('-', '_')

            newdeck = {'doc_type' : 'deck', 'description' : description, 'deck_name' : description}
            newdeck['cards_list'] = []
            newdeck['doc_channels'] = [group_id]
            c.insert(ckey, newdeck)
            group = c.get(group_id).value
            print(group.get('decks_list'))
            group.get('decks_list').append(ckey)
            c.upsert(group_id, group)
            success = 'success'
        except (BaseException, CouchbaseError) as e:
            success = 'error'
            print(e)

        group = c.get(group_id).value
        group_decks = group.get('decks_list')
        decks_list = []
        for d in group_decks:
            try:
                deck = c.get(d)
                decks_list.append(deck)
            except CouchbaseError:
                pass
        return HttpResponseRedirect(reverse('tutor:group_decks', kwargs={'group_id' : group_id}))
Beispiel #28
0
    def post(self, request, group_id, deck_id):
        c = Bucket('couchbase://localhost/nihongo')
        success = 'dunno'
        print('deleting deck')
        try:
            c.delete(deck_id)
            group = c.get(group_id).value
            print(group.get('decks_list'))
            group.get('decks_list').remove(deck_id)
            c.upsert(group_id, group)
            success = 'success'
        except (BaseException, CouchbaseError) as e:
            success = 'error'
            print(e)

        group = c.get(group_id).value
        group_decks = group.get('decks_list')
        decks_list = []
        for d in group_decks:
            try:
                deck = c.get(d)
                decks_list.append(deck)
            except CouchbaseError:
                pass
        return HttpResponseRedirect(reverse('tutor:group_decks', kwargs={'group_id' : group_id}))
    def create_bucket(self, name, ramQuotaMB=1024):
        """
        1. Create CBS bucket via REST
        2. Create client connection and poll until bucket is available
           Catch all connection exception and break when KeyNotFound error is thrown
        3. Verify all server nodes are in a 'healthy' state before proceeding

        Followed the docs below that suggested this approach.
        http://docs.couchbase.com/admin/admin/REST/rest-bucket-create.html
        """

        log_info("Creating bucket {} with RAM {}".format(name, ramQuotaMB))

        data = {
            "name": name,
            "ramQuotaMB": str(ramQuotaMB),
            "authType": "sasl",
            "proxyPort": "11211",
            "bucketType": "couchbase",
            "flushEnabled": "1"
        }

        resp = self._session.post("{}/pools/default/buckets".format(self.url), data=data)
        log_r(resp)
        resp.raise_for_status()

        # Create client an retry until KeyNotFound error is thrown
        start = time.time()
        while True:

            if time.time() - start > keywords.constants.CLIENT_REQUEST_TIMEOUT:
                raise Exception("TIMEOUT while trying to create server buckets.")
            try:
                bucket = Bucket("couchbase://{}/{}".format(self.host, name))
                bucket.get('foo')
            except ProtocolError:
                log_info("Client Connection failed: Retrying ...")
                time.sleep(1)
                continue
            except TemporaryFailError:
                log_info("Failure from server: Retrying ...")
                time.sleep(1)
                continue
            except NotFoundError:
                log_info("Key not found error: Bucket is ready!")
                break

        self.wait_for_ready_state()

        return name
Beispiel #30
0
 def _upload_test_run_dailyp(self, test_run_dict):
     try:
         bucket = Bucket('couchbase://{}/perf_daily'
                         .format(StatsSettings.CBMONITOR))
     except Exception as e:
         logger.info("Post to Dailyp, DB connection error: {}".format(e.message))
         return False
     doc_id = "{}__{}__{}__{}__{}".format(test_run_dict['category'],
                                          test_run_dict['subcategory'],
                                          test_run_dict['test'],
                                          test_run_dict['build'],
                                          test_run_dict['datetime'])
     bucket.upsert(doc_id, test_run_dict)
     return True
 def test_eventing_processes_mutation_when_xattrs_is_updated(self):
     url = 'couchbase://{ip}/{name}'.format(ip=self.master.ip,
                                            name=self.src_bucket_name)
     bucket = Bucket(url, username="******", password="******")
     for docid in ['customer123', 'customer1234', 'customer12345']:
         bucket.upsert(docid, {})
     body = self.create_save_function_body(self.function_name,
                                           self.handler_code,
                                           dcp_stream_boundary="from_now")
     # deploy eventing function
     self.deploy_function(body)
     # update multiple xattrs and update the documents
     for docid in ['customer123', 'customer1234', 'customer12345']:
         bucket.mutate_in(docid, SD.upsert('my1', {'value': 1}, xattr=True))
         bucket.mutate_in(docid, SD.upsert('my2', {'value': 2}, xattr=True))
         bucket.mutate_in(docid, SD.upsert('fax', '775-867-5309'))
     self.verify_eventing_results(self.function_name,
                                  3,
                                  skip_stats_validation=True)
     # add new multiple xattrs , delete old xattrs and delete the documents
     for docid in ['customer123', 'customer1234', 'customer12345']:
         bucket.mutate_in(docid, SD.upsert('my3', {'value': 3}, xattr=True))
         bucket.mutate_in(docid, SD.upsert('my4', {'value': 4}, xattr=True))
         bucket.mutate_in(docid, SD.remove('my3', xattr=True))
         bucket.mutate_in(docid, SD.remove('my4', xattr=True))
         bucket.remove(docid)
     self.verify_eventing_results(self.function_name,
                                  0,
                                  skip_stats_validation=True)
     self.undeploy_and_delete_function(body)
 def test_fiid_crc_with_pause_resume(self):
     body = self.create_save_function_body(
         self.function_name,
         HANDLER_CODE.BUCKET_OP_SOURCE_DOC_MUTATION,
         dcp_stream_boundary="from_now")
     # deploy eventing function
     self.deploy_function(body)
     url = 'couchbase://{ip}/{name}'.format(ip=self.master.ip,
                                            name=self.src_bucket_name)
     bucket = Bucket(url, username="******", password="******")
     for docid in ['customer123', 'customer1234', 'customer12345']:
         bucket.upsert(docid, {'a': 1})
     self.verify_eventing_results(self.function_name,
                                  3,
                                  skip_stats_validation=True)
     #get fiid and crc
     fiid_value = bucket.lookup_in('customer123',
                                   SD.exists('_eventing.fiid',
                                             xattr=True))['_eventing.fiid']
     crc_value = bucket.lookup_in('customer123',
                                  SD.exists('_eventing.crc',
                                            xattr=True))['_eventing.crc']
     self.log.info("Fiid: {} and CRC: {}".format(fiid_value, crc_value))
     # check for fiid and crc
     for docid in ['customer1234', 'customer12345']:
         fiid = bucket.lookup_in(docid,
                                 SD.exists('_eventing.fiid', xattr=True))
         crc = bucket.lookup_in(docid, SD.exists('_eventing.crc',
                                                 xattr=True))
         if fiid_value != fiid['_eventing.fiid'] or crc_value != crc[
                 '_eventing.crc']:
             self.fail("fiid {} or crc {} values are not same:".format(
                 fiid, crc))
     self.pause_function(body)
     for docid in ['customer12553', 'customer1253', 'customer12531']:
         bucket.upsert(docid, {'a': 1})
     self.resume_function(body)
     self.verify_eventing_results(self.function_name,
                                  6,
                                  skip_stats_validation=True)
     for docid in [
             'customer12553', 'customer1253', 'customer12531',
             'customer123', 'customer1234', 'customer12345'
     ]:
         fiid = bucket.lookup_in(docid,
                                 SD.exists('_eventing.fiid', xattr=True))
         crc = bucket.lookup_in(docid, SD.exists('_eventing.crc',
                                                 xattr=True))
         if fiid_value != fiid['_eventing.fiid'] or crc_value != crc[
                 '_eventing.crc']:
             self.fail("fiid {} or crc {} values are not same:".format(
                 fiid, crc))
Beispiel #33
0
#!/usr/bin/env python
from __future__ import print_function

from couchbase.bucket import Bucket
import couchbase.exceptions as E

cb = Bucket('couchbase://10.0.0.31/default')

# This always works!
print('Upserting')
cb.upsert('docid', {'property': 'value'})
print('Getting item back. Value is:', cb.get('docid').value)
print('...')

print(
    'Will try to insert the document. Should fail because the item already exists..'
)
try:
    cb.insert('docid', {'property': 'value'})
except E.KeyExistsError:
    print('Insert failed because item already exists!')
print('...')

print(
    'Replacing the document. This should work because the item already exists')
cb.replace('docid', {'property': 'new_value'})
print('Getting document again. Should contain the new contents:',
      cb.get('docid').value)
print('...')

print('Removing document.')
Beispiel #34
0
def purge(bucket, known_jobs):
    client = Bucket(HOST + '/' + bucket)
    builds_query = "select distinct `build` from {0} where `build` is not null order by `build`".format(
        bucket)
    for row in client.n1ql_query(N1QLQuery(builds_query)):
        build = row['build']
        if not build:
            continue
        jobs_by_build_query = "SELECT meta().id,name,os,component,url,totalCount,build_id from {0} " \
                              "where `build` = '{1}'".format(bucket, build)
        # all jobs
        JOBS = {}
        for job in client.n1ql_query(N1QLQuery(jobs_by_build_query)):
            _id = job['id']
            name = job['name']
            os = job['os']
            comp = job['component']
            url = job['url']
            count = job['totalCount']
            bid = job['build_id']
            isExecutor = False
            url_noauth = None
            if url.find("@") > -1:  # url has auth, clean
                url_noauth = "http://" + url.split("@")[1]

            if url.find("test_suite_executor") > -1:
                isExecutor = True

            if comp in ["UNIT", "BUILD_SANITY"]:
                continue  # don't purge build jobs

            # if job is unknown try to manually get url
            url_find = url_noauth or url
            if url_find not in known_jobs and not isExecutor:

                r = getReq(url)
                if r is None:
                    continue
                if r.status_code == 404:
                    try:
                        purge_job_details(_id, bucket)
                        client.remove(_id)
                        print "****MISSING*** %s_%s: %s:%s:%s (%s,%s)" % (
                            build, _id, os, comp, name, count, bid)
                    except:
                        pass
                    continue

            if os in JOBS:
                if comp in JOBS[os]:
                    match = [(i, n) for i, n in enumerate(JOBS[os][comp])
                             if n[0] == name]
                    if len(match) > 0:
                        idx = match[0][0]
                        oldBid = match[0][1][1]
                        oldDocId = match[0][1][2]
                        if oldBid > bid:
                            # purge this docId because it is less this saved bid
                            try:
                                purge_job_details(_id, bucket, olderBuild=True)
                                client.remove(_id)
                                print "****PURGE-KEEP*** %s_%s: %s:%s:%s (%s,%s < %s)" % (
                                    build, _id, os, comp, name, count, bid,
                                    oldBid)
                            except:
                                pass
                        else:
                            # bid must exist in prior to purge replace

                            r = getReq(url + "/" + str(bid))
                            if r is None:
                                continue
                            if r.status_code == 404:
                                # delete this newer bid as it no longer exists
                                try:
                                    client.remove(_id)
                                except:
                                    pass
                            else:
                                # purge old docId
                                try:
                                    purge_job_details(oldDocId,
                                                      bucket,
                                                      olderBuild=True)
                                    client.remove(oldDocId)
                                    # use this bid as new tracker
                                    JOBS[os][comp][idx] = (name, bid, _id)
                                    print "****PURGE-REPLACE*** %s_%s: %s:%s:%s (%s,%s > %s)" % (
                                        build, _id, os, comp, name, count, bid,
                                        oldBid)
                                except:
                                    pass
                        continue
                    else:
                        # append to current comp
                        JOBS[os][comp].append((name, bid, _id))
                else:
                    # new comp
                    JOBS[os][comp] = [(name, bid, _id)]
            else:
                # new os
                JOBS[os] = {}
                JOBS[os][comp] = [(name, bid, _id)]
Beispiel #35
0
def run(s, b, p, i, f):
    filename = "{}/{}".format(defaultmetapath, f)
    with open(filename) as data_file:
        dataset = json.load(data_file)

    if p is not None:
        cb = Bucket("couchbase://{}/{}?operation_timeout=10".format(s, b),
                    password=p)
    else:
        cb = Bucket("couchbase://{}/{}?operation_timeout=10".format(s, b))

    ids = []
    for int_id in range(1, _items - 1):
        ids.append("{}_ycsb".format(int_id))

    random.shuffle(ids)
    counter = 0
    print "Bulding read dataset:"
    for docid in ids:
        counter += 1
        if counter % 100 == 0:
            p, fs = filling_progress(dataset)
            # print "parsed {} values. Dataset is full on {}%".format(counter, p)
            progress_par = "["
            for ip in range(0, 200):
                if ip > p * 2:
                    progress_par += " "
                else:
                    progress_par += "#"
            progress_par += "]"
            print progress_par

        document = cb.get(docid).value

        for dataset_field in dataset.keys():
            for document_field in document.keys():
                if "[]{}" in dataset_field:
                    arr_name, sub_field_name = dataset_field.split('[]{}')
                    if document_field == arr_name:
                        arr = document[arr_name]
                        arr_size = len(arr)
                        if arr_size > 0:
                            if arr_size == 1:
                                value = arr[0][sub_field_name]
                            else:
                                random_item = random.randint(0, arr_size - 1)
                                value = arr[random_item][sub_field_name]
                            post_to_dataset(dataset, dataset_field, value)

                elif "[]" in dataset_field:
                    arr_name = dataset_field.split('[]')[0]
                    if document_field == arr_name:
                        arr = document[arr_name]
                        arr_size = len(arr)
                        if arr_size > 0:
                            if arr_size == 1:
                                value = arr[0]
                            else:
                                random_item = random.randint(0, arr_size - 1)
                                value = arr[random_item]
                                post_to_dataset(dataset, dataset_field, value)

                elif "{}" in dataset_field:
                    obj_name, obj_field_name = dataset_field.split('{}')
                    if obj_name == document_field:
                        value = document[document_field][obj_field_name]
                        post_to_dataset(dataset, dataset_field, value)

                else:
                    if document_field == dataset_field:
                        value = document[document_field]
                        post_to_dataset(dataset, dataset_field, value)

        full, _ = is_full(dataset)
        totally_full = False
        if full:
            print "Read dataset is completed, building update dataset"
            random.shuffle(ids)
            max_keys = dataset["_id"]["max_docs_to_update"]
            counter = 0
            for id in ids:
                dataset["_id"]["docs_to_update"].append(id)
                counter += 1
                if counter > max_keys:
                    totally_full = True
                    break

            if totally_full:
                print "Updated dataset is complerted, dumping to a file"
                outf = f.replace("meta.", "dataset.")
                filenameout = "{}/{}".format(defaultdatapath, outf)
                with open(filenameout, 'w') as fp:
                    fp.write(
                        json.dumps(dataset,
                                   sort_keys=True,
                                   indent=4,
                                   separators=(',', ': ')))
                fp.close()
                sys.exit()
            else:
                print "Not enough unique values to satisfy the update docs requirement"

    full, fields = is_full(dataset, with_fields=True)
    if not full:
        print "Not enough unique values to satisfy the dataset requirements. Fields not full are: \n {}".format(
            fields)
 def test_eventing_processes_mutations_when_mutated_through_subdoc_api_and_set_expiry_through_sdk(
         self):
     # set expiry pager interval
     ClusterOperationHelper.flushctl_set(self.master,
                                         "exp_pager_stime",
                                         1,
                                         bucket=self.src_bucket_name)
     url = 'couchbase://{ip}/{name}'.format(ip=self.master.ip,
                                            name=self.src_bucket_name)
     bucket = Bucket(url, username="******", password="******")
     for docid in ['customer123', 'customer1234', 'customer12345']:
         bucket.insert(docid, {'some': 'value'})
     body = self.create_save_function_body(self.function_name,
                                           self.handler_code,
                                           dcp_stream_boundary="from_now")
     # deploy eventing function
     self.deploy_function(body)
     # upserting a new sub-document
     bucket.mutate_in('customer123', SD.upsert('fax', '775-867-5309'))
     # inserting a sub-document
     bucket.mutate_in(
         'customer1234',
         SD.insert('purchases.complete', [42, True, None],
                   create_parents=True))
     # Creating and populating an array document
     bucket.mutate_in(
         'customer12345',
         SD.array_append('purchases.complete', ['Hello'],
                         create_parents=True))
     self.verify_eventing_results(self.function_name,
                                  3,
                                  skip_stats_validation=True)
     for docid in ['customer123', 'customer1234', 'customer12345']:
         # set expiry on all the docs created using sub doc API
         bucket.touch(docid, ttl=5)
     self.sleep(10, "wait for expiry of the documents")
     # Wait for eventing to catch up with all the expiry mutations and verify results
     self.verify_eventing_results(self.function_name,
                                  0,
                                  skip_stats_validation=True)
     self.undeploy_and_delete_function(body)
Beispiel #37
0
from couchbase.bucket import Bucket
from couchbase.exceptions import NotFoundError
from couchbase.exceptions import CouchbaseTransientError
from couchbase.n1ql import N1QLQuery

cb = Bucket('couchbase://localhost/bibbiacei2008')
class ID:
    book = ""
    chapter = ""
    verse = ""

    def generateID(self):
        return "t:"+ str(self.book) + "_" + str(self.chapter) + "_" + str(self.verse)

    def nextVerse(self):
        nextVerseID = ID()
        nextVerseID.book = self.book
        nextVerseID.chapter = self.chapter
        nextVerseID.verse = str(int(self.verse) + 1)
        return nextVerseID

    def prevVerse(self):
        prevVerseID = ID()
        prevVerseID.book = self.book
        prevVerseID.chapter = self.chapter
        if int(self.verse) > 1:
            prevVerseID.verse = str(int(self.verse) - 1)
            return prevVerseID
        else:
            return
Beispiel #38
0
def new_client(host, bucket, password, timeout):
    connection_string = 'couchbase://{}/{}?password={}'
    connection_string = connection_string.format(host, bucket, password)
    client = Bucket(connection_string=connection_string)
    client.timeout = timeout
    return client
Beispiel #39
0
# -*- coding:UTF-8 -*-
import json
import copy
import uuid
import os
import urlparse
import urllib2
import shutil
import boto3
from couchbase.bucket import Bucket
#mys3 = boto3.resource('s3')
#s3 = boto3.resource('s3', region_name = "cn-north-1")
conn_src = Bucket('couchbase://47.94.135.179:8091/catalogue')  # uat couchbase
#global_src  = Bucket('couchbase://47.89.179.118:8091/catalogue') # global couchbase
#local_src  = Bucket('couchbase://localhost:8091/catalogue')


def getData(key):
    data = None
    # try:
    # data = conn_src.get(key)
    # except Exception as e:
    # data = None
    data = conn_src.get(key)
    return data.value


def insertData(key, data):
    conn_src.insert(key, data)

Beispiel #40
0
def connect_db():
    return Bucket(CONNSTR, password=PASSWORD)
Beispiel #41
0
 def _update_document(self, bucket_name, key, document):
     url = 'couchbase://{ip}/{name}'.format(ip=self.master.ip,
                                            name=bucket_name)
     bucket = Bucket(url, username=bucket_name, password="******")
     bucket.upsert(key, document)
Beispiel #42
0
 def __init__(self, bucket):
     from couchbase.bucket import Bucket
     self.bucket = Bucket('couchbase://{}/{}'.format(
         os.environ.get('CB_HOST'), bucket))
     self.bucket.timeout = 30
Beispiel #43
0
from couchbase.bucket import Bucket
from couchbase.n1ql import N1QLQuery
import json
import sys
import time
import telepot
from telepot.loop import MessageLoop

## initialisation

cb = Bucket('couchbase://localhost/Listes')
bot = telepot.Bot('410210312:AAHvgXTuRxKKCoH6gUiR0rIrxCikSuj25ac')

## communication avec couchbase


def create_produit(name, user, category="defaut", liste="courses"):
    prod_id = cb.get('product_count').value + 1
    produit = {
        'name': name,
        'category': category,
        'liste': liste,
        'user': user
    }
    cb.upsert(str(prod_id), produit)
    cb.counter('product_count')


def get_liste(liste):
    query = N1QLQuery('SELECT * FROM `Listes` WHERE liste=$filtre',
                      filtre=liste)
# -*- coding: utf-8 -*-
import io
import os
import csv
from couchbase.bucket import Bucket
from couchbase.exceptions import KeyExistsError

bucket = Bucket('couchbase://localhost/servidorescomissionados')

# procura o nome da planilha de cadastro
cadastro_filename = [
    filename for filename in os.listdir('dados/servidores/')
    if os.path.isfile(os.path.join('dados/servidores/', filename))
    and filename.split('.')[-1] == 'csv' and 'Cadastro' in filename
][0]

print u'Processando {}...'.format(cadastro_filename)
with open(os.path.join('dados/servidores/', cadastro_filename)) as f:
    dados = f.read()
    # retira os bytes NUL (nao deveriam existir na planilha e dao erro)
    dados = dados.replace('\x00', '')
    planilha = csv.DictReader(io.BytesIO(dados), delimiter='\t')
    for linha in planilha:
        # definicao da chave
        # se o eleitor se filiou e se desfiliou mais de uma vez,
        # nao sera considerado (geralmente eh uma desfiliacao
        # a pedido e outra via judicial)
        chave = "-".join((
            linha['CPF'].decode('iso-8859-1'),
            linha['NOME'].decode('iso-8859-1'),
            linha['MATRICULA'].decode('iso-8859-1'),
Beispiel #45
0
def main():

    usage = '%prog -s suitefile -v version -o OS'
    parser = OptionParser(usage)
    parser.add_option('-v', '--version', dest='version')
    parser.add_option(
        '-r', '--run',
        dest='run')  # run is ambiguous but it means 12 hour or weekly
    parser.add_option('-o', '--os', dest='os')
    parser.add_option('-n',
                      '--noLaunch',
                      action="store_true",
                      dest='noLaunch',
                      default=False)
    parser.add_option('-c', '--component', dest='component', default=None)
    parser.add_option('-p', '--poolId', dest='poolId', default='12hour')
    parser.add_option('-a', '--addPoolId', dest='addPoolId', default=None)
    parser.add_option('-t',
                      '--test',
                      dest='test',
                      default=False,
                      action='store_true')  # use the test Jenkins
    parser.add_option('-s',
                      '--subcomponent',
                      dest='subcomponent',
                      default=None)
    parser.add_option('-e',
                      '--extraParameters',
                      dest='extraParameters',
                      default=None)
    parser.add_option('-y', '--serverType', dest='serverType',
                      default='VM')  # or could be Docker
    parser.add_option('-u', '--url', dest='url', default=None)
    parser.add_option('-j', '--jenkins', dest='jenkins', default=None)
    parser.add_option('-b', '--branch', dest='branch', default='master')
    parser.add_option('-f',
                      '--framework',
                      dest='framework',
                      default='testrunner')

    # dashboardReportedParameters is of the form param1=abc,param2=def
    parser.add_option('-d',
                      '--dashboardReportedParameters',
                      dest='dashboardReportedParameters',
                      default=None)

    options, args = parser.parse_args()

    print 'the run is', options.run
    print 'the  version is', options.version
    releaseVersion = float('.'.join(options.version.split('.')[:2]))
    print 'release version is', releaseVersion

    print 'nolaunch', options.noLaunch
    print 'os', options.os
    #print 'url', options.url

    print 'url is', options.url

    print 'the reportedParameters are', options.dashboardReportedParameters

    # What do we do with any reported parameters?
    # 1. Append them to the extra (testrunner) parameters
    # 2. Append the right hand of the equals sign to the subcomponent to make a report descriptor

    if options.extraParameters is None:
        if options.dashboardReportedParameters is None:
            runTimeTestRunnerParameters = None
        else:
            runTimeTestRunnerParameters = options.dashboardReportedParameters
    else:
        runTimeTestRunnerParameters = options.extraParameters
        if options.dashboardReportedParameters is not None:
            runTimeTestRunnerParameters = options.extraParameters + ',' + options.dashboardReportedParameters

    #f = open(options.suiteFile)
    #data = f.readlines()

    testsToLaunch = []

    #for d in data:
    #  fields = d.split()
    #  testsToLaunch.append( {'descriptor':fields[0],'confFile':fields[1],'iniFile':fields[2],
    #                         'serverCount':int(fields[3]), 'timeLimit':int(fields[4]),
    #                          'parameters':fields[5]})

    cb = Bucket('couchbase://' + TEST_SUITE_DB + '/QE-Test-Suites')

    if options.component is None or options.component == 'None':
        queryString = "select * from `QE-Test-Suites` where '" + options.run + "' in partOf order by component"
    else:
        if options.subcomponent is None or options.subcomponent == 'None':
            splitComponents = options.component.split(',')
            componentString = ''
            for i in range(len(splitComponents)):
                componentString = componentString + "'" + splitComponents[
                    i] + "'"
                if i < len(splitComponents) - 1:
                    componentString = componentString + ','

            queryString = "select * from `QE-Test-Suites` where \"{0}\" in partOf and component in [{1}] order by component;".format(
                options.run, componentString)

        else:
            # have a subcomponent, assume only 1 component

            splitSubcomponents = options.subcomponent.split(',')
            subcomponentString = ''
            for i in range(len(splitSubcomponents)):
                print 'subcomponentString is', subcomponentString
                subcomponentString = subcomponentString + "'" + splitSubcomponents[
                    i] + "'"
                if i < len(splitSubcomponents) - 1:
                    subcomponentString = subcomponentString + ','
            queryString = "select * from `QE-Test-Suites` where \"{0}\" in partOf and component in ['{1}'] and subcomponent in [{2}];".\
                format(options.run, options.component, subcomponentString)

    print 'the query is', queryString  #.format(options.run, componentString)
    query = N1QLQuery(queryString)
    results = cb.n1ql_query(queryString)

    for row in results:
        try:
            data = row['QE-Test-Suites']
            data['config'] = data['config'].rstrip(
            )  # trailing spaces causes problems opening the files
            print 'row', data

            # check any os specific
            if 'os' not in data or (data['os'] == options.os) or \
                (data['os'] == 'linux' and options.os in set(['centos','ubuntu']) ):

                # and also check for which release it is implemented in
                if 'implementedIn' not in data or releaseVersion >= float(
                        data['implementedIn']):
                    if 'jenkins' in data:
                        # then this is sort of a special case, launch the old style Jenkins job
                        # not implemented yet
                        print 'Old style Jenkins', data['jenkins']
                    else:
                        if 'initNodes' in data:
                            initNodes = data['initNodes'].lower() == 'true'
                        else:
                            initNodes = True
                        if 'installParameters' in data:
                            installParameters = data['installParameters']
                        else:
                            installParameters = 'None'
                        if 'slave' in data:
                            slave = data['slave']
                        else:
                            slave = 'P0'
                        if 'owner' in data:
                            owner = data['owner']
                        else:
                            owner = 'QE'
                        if 'mailing_list' in data:
                            mailing_list = data['mailing_list']
                        else:
                            mailing_list = '*****@*****.**'

                        # if there's an additional pool, get the number
                        # of additional servers needed from the ini
                        addPoolServerCount = getNumberOfAddpoolServers(
                            data['config'], options.addPoolId)

                        testsToLaunch.append({
                            'component':
                            data['component'],
                            'subcomponent':
                            data['subcomponent'],
                            'confFile':
                            data['confFile'],
                            'iniFile':
                            data['config'],
                            'serverCount':
                            getNumberOfServers(data['config']),
                            'addPoolServerCount':
                            addPoolServerCount,
                            'timeLimit':
                            data['timeOut'],
                            'parameters':
                            data['parameters'],
                            'initNodes':
                            initNodes,
                            'installParameters':
                            installParameters,
                            'slave':
                            slave,
                            'owner':
                            owner,
                            'mailing_list':
                            mailing_list
                        })
                else:
                    print data['component'], data[
                        'subcomponent'], ' is not supported in this release'
            else:
                print 'OS does not apply to', data['component'], data[
                    'subcomponent']

        except Exception as e:
            print 'exception in querying tests, possible bad record'
            print traceback.format_exc()
            print data

    print 'tests to launch:'
    for i in testsToLaunch:
        print i['component'], i['subcomponent']
    print '\n\n'

    launchStringBase = 'http://qa.sc.couchbase.com/job/test_suite_executor'

    # optional add [-docker] [-Jenkins extension]
    if options.serverType.lower() == 'docker':
        launchStringBase = launchStringBase + '-docker'
    if options.test:
        launchStringBase = launchStringBase + '-test'
    if options.framework.lower() == "jython":
        launchStringBase = launchStringBase + '-jython'
    elif options.jenkins is not None:
        launchStringBase = launchStringBase + '-' + options.jenkins

    # this are VM/Docker dependent - or maybe not
    launchString = launchStringBase + '/buildWithParameters?token=test_dispatcher&' + \
                        'version_number={0}&confFile={1}&descriptor={2}&component={3}&subcomponent={4}&' + \
                         'iniFile={5}&parameters={6}&os={7}&initNodes={' \
                         '8}&installParameters={9}&branch={10}&slave={' \
                         '11}&owners={12}&mailing_list={13}'
    if options.url is not None:
        launchString = launchString + '&url=' + options.url

    summary = []

    while len(testsToLaunch) > 0:
        try:
            # this bit is Docker/VM dependent
            getAvailUrl = 'http://' + SERVER_MANAGER + '/getavailablecount/'
            if options.serverType.lower() == 'docker':
                # may want to add OS at some point
                getAvailUrl = getAvailUrl + 'docker?os={0}&poolId={1}'.format(
                    options.os, options.poolId)
            else:
                getAvailUrl = getAvailUrl + '{0}?poolId={1}'.format(
                    options.os, options.poolId)

            response, content = httplib2.Http(timeout=60).request(
                getAvailUrl, 'GET')
            if response.status != 200:
                print time.asctime(time.localtime(
                    time.time())), 'invalid server response', content
                time.sleep(POLL_INTERVAL)
            elif int(content) == 0:
                print time.asctime(time.localtime(time.time())), 'no VMs'
                time.sleep(POLL_INTERVAL)
            else:
                #see if we can match a test
                serverCount = int(content)
                print time.asctime(time.localtime(time.time(
                ))), 'there are', serverCount, ' servers available'

                haveTestToLaunch = False
                i = 0
                while not haveTestToLaunch and i < len(testsToLaunch):
                    if testsToLaunch[i]['serverCount'] <= serverCount:
                        if testsToLaunch[i]['addPoolServerCount']:
                            getAddPoolUrl = 'http://' + SERVER_MANAGER + '/getavailablecount/'
                            if options.serverType.lower() == 'docker':
                                # may want to add OS at some point
                                getAddPoolUrl = getAddPoolUrl + 'docker?os={0}&poolId={1}'.format(
                                    options.os, options.addPoolId)
                            else:
                                getAddPoolUrl = getAddPoolUrl + '{0}?poolId={1}'.format(
                                    options.os, options.addPoolId)

                            response, content = httplib2.Http(
                                timeout=60).request(getAddPoolUrl, 'GET')
                            if response.status != 200:
                                print time.asctime(time.localtime(time.time(
                                ))), 'invalid server response', content
                                time.sleep(POLL_INTERVAL)
                            elif int(content) == 0:
                                print time.asctime(
                                    time.localtime(time.time())),\
                                    'no {0} VMs at this time'.format(options.addPoolId)
                                i = i + 1
                            else:
                                print time.asctime( time.localtime(time.time()) ),\
                                    "there are {0} {1} servers available".format(int(content), options.addPoolId)
                                haveTestToLaunch = True
                        else:
                            haveTestToLaunch = True
                    else:
                        i = i + 1

                if haveTestToLaunch:
                    # build the dashboard descriptor
                    dashboardDescriptor = urllib.quote(
                        testsToLaunch[i]['subcomponent'])
                    if options.dashboardReportedParameters is not None:
                        for o in options.dashboardReportedParameters.split(
                                ','):
                            dashboardDescriptor += '_' + o.split('=')[1]

                    # and this is the Jenkins descriptor
                    descriptor = urllib.quote(
                        testsToLaunch[i]['component'] + '-' +
                        testsToLaunch[i]['subcomponent'] + '-' +
                        time.strftime('%b-%d-%X') + '-' + options.version)

                    # grab the server resources
                    # this bit is Docker/VM dependent
                    if options.serverType.lower() == 'docker':
                        getServerURL = 'http://' + SERVER_MANAGER + \
                               '/getdockers/{0}?count={1}&os={2}&poolId={3}'. \
                          format(descriptor, testsToLaunch[i]['serverCount'], \
                                 options.os, options.poolId)

                    else:
                        getServerURL = 'http://' + SERVER_MANAGER + \
                                '/getservers/{0}?count={1}&expiresin={2}&os={3}&poolId={4}'. \
                           format(descriptor, testsToLaunch[i]['serverCount'],testsToLaunch[i]['timeLimit'], \
                                  options.os, options.poolId)
                    print 'getServerURL', getServerURL

                    response, content = httplib2.Http(timeout=60).request(
                        getServerURL, 'GET')
                    print 'response.status', response, content

                    if options.serverType.lower() != 'docker':
                        # sometimes there could be a race, before a dispatcher process acquires vms,
                        # another waiting dispatcher process could grab them, resulting in lesser vms
                        # for the second dispatcher process
                        if len(json.loads(
                                content)) != testsToLaunch[i]['serverCount']:
                            continue

                    # get additional pool servers as needed
                    if testsToLaunch[i]['addPoolServerCount']:
                        if options.serverType.lower() == 'docker':
                            getServerURL = 'http://' + SERVER_MANAGER + \
                                   '/getdockers/{0}?count={1}&os={2}&poolId={3}'. \
                              format(descriptor,
                                     testsToLaunch[i]['addPoolServerCount'],
                                     options.os,
                                     options.addPoolId)

                        else:
                            getServerURL = 'http://' + SERVER_MANAGER + \
                                    '/getservers/{0}?count={1}&expiresin={2}&os={3}&poolId={4}'. \
                               format(descriptor,
                                      testsToLaunch[i]['addPoolServerCount'],
                                      testsToLaunch[i]['timeLimit'], \
                                      options.os,
                                      options.addPoolId)
                        print 'getServerURL', getServerURL

                        response2, content2 = httplib2.Http(
                            timeout=60).request(getServerURL, 'GET')
                        print 'response2.status', response2, content2



                    if response.status == 499 or \
                            (testsToLaunch[i]['addPoolServerCount'] and
                            response2.status == 499):
                        time.sleep(POLL_INTERVAL
                                   )  # some error checking here at some point
                    else:
                        # and send the request to the test executor

                        # figure out the parameters, there are test suite specific, and added at dispatch time
                        if runTimeTestRunnerParameters is None:
                            parameters = testsToLaunch[i]['parameters']
                        else:
                            if testsToLaunch[i]['parameters'] == 'None':
                                parameters = runTimeTestRunnerParameters
                            else:
                                parameters = testsToLaunch[i][
                                    'parameters'] + ',' + runTimeTestRunnerParameters

                        url = launchString.format(
                            options.version, testsToLaunch[i]['confFile'],
                            descriptor, testsToLaunch[i]['component'],
                            dashboardDescriptor, testsToLaunch[i]['iniFile'],
                            urllib.quote(parameters), options.os,
                            testsToLaunch[i]['initNodes'],
                            testsToLaunch[i]['installParameters'],
                            options.branch, testsToLaunch[i]['slave'],
                            urllib.quote(testsToLaunch[i]['owner']),
                            urllib.quote(testsToLaunch[i]['mailing_list']))

                        if options.serverType.lower() != 'docker':
                            r2 = json.loads(content)
                            servers = json.dumps(r2).replace(' ', '').replace(
                                '[', '', 1)
                            servers = rreplace(servers, ']', 1)
                            url = url + '&servers=' + urllib.quote(servers)

                            if testsToLaunch[i]['addPoolServerCount']:
                                addPoolServers = content2.replace(' ','')\
                                                   .replace('[','', 1)
                                addPoolServers = rreplace(
                                    addPoolServers, ']', 1)
                                url = url + '&addPoolServerId=' +\
                                      options.addPoolId +\
                                      '&addPoolServers=' +\
                                      urllib.quote(addPoolServers)

                        print '\n', time.asctime(time.localtime(
                            time.time())), 'launching ', url
                        print url

                        if options.noLaunch:
                            # free the VMs
                            time.sleep(3)
                            if options.serverType.lower() == 'docker':
                                pass  # figure docker out later
                            else:
                                response, content = httplib2.Http(timeout=60).\
                                    request('http://' + SERVER_MANAGER + '/releaseservers/' + descriptor + '/available', 'GET')
                                print 'the release response', response, content
                        else:
                            response, content = httplib2.Http(
                                timeout=60).request(url, 'GET')

                        testsToLaunch.pop(i)
                        summary.append({
                            'test':
                            descriptor,
                            'time':
                            time.asctime(time.localtime(time.time()))
                        })
                        if options.noLaunch:
                            pass  # no sleeping necessary
                        elif options.serverType.lower() == 'docker':
                            time.sleep(
                                240
                            )  # this is due to the docker port allocation race
                        else:
                            time.sleep(30)
                else:
                    print 'not enough servers at this time'
                    time.sleep(POLL_INTERVAL)
            #endif checking for servers

        except Exception as e:
            print 'have an exception'
            print traceback.format_exc()
            time.sleep(POLL_INTERVAL)
    #endwhile

    print '\n\n\ndone, everything is launched'
    for i in summary:
        print i['test'], 'was launched at', i['time']
    return
Beispiel #46
0
 def _create_test_bucket(self, host, bucket_name, password):
     return Bucket('couchbase://%s/%s' % (host, bucket_name),
                   password=password)
 def __init__(self, connection_string):
     ThreadSafeDataSource.__init__(self)
     self.connection_string = connection_string
     self.create_client = lambda connection_string: Bucket(self.connection_string, timeout=60)
Beispiel #48
0
SiteAddr = {}
for AirBox_SiteName in AirBox_SiteNames:
    SiteName_File = open(AirBox_SiteName, 'r')
    for line in SiteName_File:
        items = re.split('\t', line.rstrip('\r\n'))
        SiteName[items[0]] = items[1]

if (USE_MongoDB == 1):
    mongodb_client = pymongo.MongoClient(MongoDB_SERVER,
                                         MongoDB_PORT,
                                         serverSelectionTimeoutMS=0)
    mongodb_db = mongodb_client[MongoDB_DB]

if (USE_CouchbaseDB == 1):
    try:
        couchbase_db = Bucket(Couchbase_SERVER)
    except:
        print(
            "[ERROR] Cannot connect to Couchbase server. Skip the following Couchbase insertions."
        )
        USE_CouchbaseDB = 0

num_re_pattern = re.compile("^-?\d+\.\d+$|^-?\d+$")

mqtt_client = mqtt.Client()
mqtt_client.on_connect = on_connect
mqtt_client.on_message = on_message

mqtt_client.connect(MQTT_SERVER, MQTT_PORT, MQTT_ALIVE)

# Blocking call that processes network traffic, dispatches callbacks and
 def test_eventing_where_dataset_has_different_key_types_using_sdk_and_n1ql(
         self):
     keys = [
         "1324345656778878089435468780879760894354687808797613243456567788780894354687808797613243456567788780894354687808797613243456567788780894354687808797613287808943546878087976132434565677887808943546878087976132434565677887808943546878087976132943546878",
         # max key size
         "1",  # Numeric key, see MB-26706
         "a1",  # Alphanumeric
         "1a",  # Alphanumeric
         "1 a b",  # Alphanumeric with space
         "1.234",  # decimal
         "~`!@  #$%^&*()-_=+{}|[]\:\";\'<>?,./",  # all special characters
         "\xc2\xa1 \xc2\xa2 \xc2\xa4 \xc2\xa5",  # utf-8 encoded characters
         "true",  # boolean key
         "false",  # boolean key
         "True",  # boolean key
         "False",  # boolean key
         "null",  # null key
         "undefined",  # undefined key
         # Check here for javascript builtin objects : https://mzl.la/1zDsM8O
         "NaN",
         "Symbol()",
         "Symbol(42)"
         "Symbol(\'foo\')",
         "isNaN",
         "Error",
         "Function",
         "Infinity",
         "Atomics",
         "Boolean",
         "ArrayBuffer",
         "DataView",
         "Date",
         "Generator {}",
         "InternalError",
         "Intl",
         "Number",
         "Math",
         "Map",
         "Promise",
         "Proxy"
     ]
     url = 'couchbase://{ip}/{name}'.format(ip=self.master.ip,
                                            name=self.src_bucket_name)
     bucket = Bucket(url, username="******", password="******")
     for key in keys:
         bucket.upsert(key, "Test with different key values")
     # create a doc using n1ql query
     query = "INSERT INTO  " + self.src_bucket_name + " ( KEY, VALUE ) VALUES ('key11111','from N1QL query')"
     self.n1ql_helper.run_cbq_query(query=query, server=self.n1ql_node)
     body = self.create_save_function_body(self.function_name,
                                           self.handler_code)
     self.deploy_function(body)
     # Wait for eventing to catch up with all the update mutations and verify results
     self.verify_eventing_results(self.function_name,
                                  len(keys) + 1,
                                  skip_stats_validation=True)
     # delete all the documents with different key types
     for key in keys:
         bucket.remove(key)
     # delete a doc using n1ql query
     self.n1ql_helper.create_primary_index(using_gsi=True,
                                           server=self.n1ql_node)
     query = "DELETE FROM " + self.src_bucket_name + " where meta().id='key11111'"
     self.n1ql_helper.run_cbq_query(query=query, server=self.n1ql_node)
     # Wait for eventing to catch up with all the delete mutations and verify results
     self.verify_eventing_results(self.function_name,
                                  0,
                                  skip_stats_validation=True)
     self.undeploy_and_delete_function(body)
     self.n1ql_helper.drop_primary_index(using_gsi=True,
                                         server=self.n1ql_node)
Beispiel #50
0
def set(key, value, bucket = "default", password = ""):

    cb = Bucket(host=cfg.COUCHBASE_IP + "/" + self.bucket)
    cb.set({key : value})
def test_mobile_opt_in(params_from_base_test_setup, sg_conf_name):
    """
    Scenario: Enable mobile opt in sync function in sync-gateway configuration file
    - Check xattrs/mobile-opt-in_cc or di json files
    - 8 cases covered
    - doc : https://docs.google.com/document/d/1XxLIBsjuj_UxTTJs4Iu7C7uZdos8ZEzeckrVc17y3sw/edit
    - #1 Create doc via sdk with mobile opt in and verify doc is imported
    - #2 Create doc via sdk with mobile opt out and verify doc is not imported
    - #3 Create doc via sg with mobile opt in and update via sdk and verify doc is imported
    - #4 Create doc via sg with mobile opt out and update via sdk and verify doc is not imported
         - Try to update same doc via sg and verify 409 conflict error is thrown
         - Create a doc with same doc id and verify doc is created successfully
    - #5 Create doc via sg with mobile opt out and update via sdk which created no revisions
         - Now do sdk create with mobile opt in should import case #5
    - #6 Create doc via sg with mobile opt out  and update via sdk with opt in
         - Verify type is overrided and doc is imported
    - #7 Create doc via sg with mobile opt in  and update via sdk with opt out
         - Verify type is overrided and doc is not imported
    - #8 Disable import in the sg config and have mobile opt in function
         Create doc via sdk with mobile property and verify sg update succeeds
    - #9 Same config as #8 and have mobile opt in function in config
         Create doc via sdk without mobile property and create new doc via sg with same doc id and
         verify it succeeds
    """

    bucket_name = 'data-bucket'
    sg_db = 'db'

    cluster_conf = params_from_base_test_setup['cluster_config']
    cluster_topology = params_from_base_test_setup['cluster_topology']
    mode = params_from_base_test_setup['mode']
    xattrs_enabled = params_from_base_test_setup['xattrs_enabled']

    # This test should only run when using xattr meta storage
    if not xattrs_enabled:
        pytest.skip('XATTR tests require --xattrs flag')

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)
    sg_admin_url = cluster_topology['sync_gateways'][0]['admin']
    sg_url = cluster_topology['sync_gateways'][0]['public']
    cbs_url = cluster_topology['couchbase_servers'][0]

    log_info('sg_conf: {}'.format(sg_conf))
    log_info('sg_admin_url: {}'.format(sg_admin_url))
    log_info('sg_url: {}'.format(sg_url))
    log_info('cbs_url: {}'.format(cbs_url))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    # Create clients
    sg_client = MobileRestClient()
    cbs_ip = host_for_url(cbs_url)
    sdk_client = Bucket('couchbase://{}/{}'.format(cbs_ip, bucket_name), password='******', timeout=SDK_TIMEOUT)

    # Create user / session
    auto_user_info = UserInfo(name='autotest', password='******', channels=['mobileOptIn'], roles=[])
    sg_client.create_user(
        url=sg_admin_url,
        db=sg_db,
        name=auto_user_info.name,
        password=auto_user_info.password,
        channels=auto_user_info.channels
    )

    test_auth_session = sg_client.create_session(
        url=sg_admin_url,
        db=sg_db,
        name=auto_user_info.name,
        password=auto_user_info.password
    )

    def update_mobile_prop():
        return {
            'updates': 0,
            'type': 'mobile',
        }

    def update_non_mobile_prop():
        return {
            'updates': 0,
            'test': 'true',
            'type': 'mobile opt out',
        }

    # Create first doc via SDK with type mobile. Case #1
    doc_id1 = 'mobile_opt_in_sdk_doc'
    doc = document.create_doc(doc_id=doc_id1, channels=['mobileOptIn'], prop_generator=update_mobile_prop)
    sdk_client.upsert(doc_id1, doc)
    sg_get_doc1 = sg_client.get_doc(url=sg_url, db=sg_db, doc_id=doc_id1, auth=test_auth_session)
    assert sg_get_doc1['_rev'].startswith('1-') and sg_get_doc1['_id'] == doc_id1
    # Additional coverage for case #1
    sg_client.update_doc(url=sg_url, db=sg_db, doc_id=doc_id1, number_updates=1, auth=test_auth_session)
    sg_get_doc1 = sg_client.get_doc(url=sg_url, db=sg_db, doc_id=doc_id1, auth=test_auth_session)
    assert sg_get_doc1['_rev'].startswith('2-') and sg_get_doc1['_id'] == doc_id1

    # Create second doc via SDK with type non mobile. Case #2
    doc_id2 = 'mobile_opt_out_sdk_doc'
    doc = document.create_doc(doc_id=doc_id2, channels=['mobileOptIn'], prop_generator=update_non_mobile_prop)
    sdk_client.upsert(doc_id2, doc)
    with pytest.raises(HTTPError) as he:
        sg_client.get_doc(url=sg_url, db=sg_db, doc_id=doc_id2, auth=test_auth_session)
    log_info(he.value)
    assert he.value.message.startswith('404 Client Error: Not Found for url:')

    # Create third sg doc with mobile opt in  and update via sdk. Case #3
    doc_id3 = 'mobile_opt_in_sg_doc'
    doc_body = document.create_doc(doc_id=doc_id3, channels=['mobileOptIn'], prop_generator=update_mobile_prop)
    doc = sg_client.add_doc(url=sg_url, db=sg_db, doc=doc_body, auth=test_auth_session)
    sg_get_doc3 = sg_client.get_doc(url=sg_url, db=sg_db, doc_id=doc_id3, auth=test_auth_session)
    sg_get_doc3["updated_sdk_via_sg"] = "1"
    sdk_client.upsert(doc_id3, sg_get_doc3)
    sg_get_doc3 = sg_client.get_doc(url=sg_url, db=sg_db, doc_id=doc_id3, auth=test_auth_session)
    assert sg_get_doc3['_rev'].startswith('2-') and sg_get_doc3['_id'] == doc_id3
    log_info("sg get doc3 is {}".format(sg_get_doc3))

    # Create fourth sg doc with mobile opt out and update via sdk. Case #4 and case #8
    doc_id4 = 'mobile_opt_out_sg_doc'
    doc_body = document.create_doc(doc_id=doc_id4, channels=['mobileOptIn'], prop_generator=update_non_mobile_prop)
    doc = sg_client.add_doc(url=sg_url, db=sg_db, doc=doc_body, auth=test_auth_session)
    # update vis SDK
    sg_get_doc4 = sg_client.get_doc(url=sg_url, db=sg_db, doc_id=doc_id4, auth=test_auth_session)
    rev = sg_get_doc4['_rev']
    sg_get_doc4["updated_sdk_via_sg"] = "1"
    sdk_client.upsert(doc_id4, sg_get_doc4)
    with pytest.raises(HTTPError) as he:
        sg_client.get_doc(url=sg_url, db=sg_db, doc_id=doc_id4, auth=test_auth_session)
    log_info(he.value)
    assert he.value.message.startswith('404 Client Error: Not Found for url:')
    # update via SG
    with pytest.raises(HTTPError) as he:
        sg_client.put_doc(url=sg_url, db=sg_db, doc_id=doc_id4, doc_body={'sg_rewrite': 'True'}, rev=rev, auth=test_auth_session)
    log_info(he.value)
    assert he.value.message.startswith('409 Client Error: Conflict for url:')
    # Create same doc again to verify there is not existing key error covers case #8
    doc_body = document.create_doc(doc_id=doc_id4, channels=['mobileOptIn'], prop_generator=update_non_mobile_prop)
    sg_get_doc4_1 = sg_client.add_doc(url=sg_url, db=sg_db, doc=doc_body, auth=test_auth_session)
    log_info("4th doc after recreate vis sg is {}".format(sg_get_doc4_1))
    assert sg_get_doc4_1['rev'].startswith('1-') and sg_get_doc4_1['id'] == doc_id4

    # Create Fifth sg doc with mobile opt in and delete doc which created no revisions i.e tombstone doc
    # Now do sdk create with mobile opt in should import case #5
    doc_id5 = 'mobile_sdk_recreate_no_activerev'
    doc_body = document.create_doc(doc_id=doc_id5, channels=['mobileOptIn'], prop_generator=update_mobile_prop)
    doc = sg_client.add_doc(url=sg_url, db=sg_db, doc=doc_body, auth=test_auth_session)
    rev = doc['rev']
    sg_client.delete_doc(url=sg_url, db=sg_db, doc_id=doc_id5, rev=rev, auth=test_auth_session)
    # At this point no active revisions for this doc, so now update via sdk with mobile opt in should be successful
    # in getting doc
    doc = document.create_doc(doc_id=doc_id5, channels=['mobileOptIn'], prop_generator=update_mobile_prop)
    sdk_client.upsert(doc_id5, doc)
    sg_get_doc5 = sg_client.get_doc(url=sg_url, db=sg_db, doc_id=doc_id5, auth=test_auth_session)
    log_info("sg get doc 5 is {}".format(sg_get_doc5))
    assert sg_get_doc5['_rev'].startswith('1-') and sg_get_doc5['_id'] == doc_id5

    # Create sixth sg doc with mobile opt out  and update via sdk with opt in
    doc_id6 = 'mobileoptout_sg_doc_sdkupdate_optin'
    doc_body = document.create_doc(doc_id=doc_id6, channels=['mobileOptIn'], prop_generator=update_non_mobile_prop)
    doc = sg_client.add_doc(url=sg_url, db=sg_db, doc=doc_body, auth=test_auth_session)
    sg_get_doc6 = sg_client.get_doc(url=sg_url, db=sg_db, doc_id=doc_id6, auth=test_auth_session)
    log_info("Sg sixth doc is {}".format(sg_get_doc6))
    sg_get_doc6["type"] = "mobile"
    sdk_client.upsert(doc_id6, sg_get_doc6)
    sg_get_doc6 = sg_client.get_doc(url=sg_url, db=sg_db, doc_id=doc_id6, auth=test_auth_session)
    assert sg_get_doc6['_rev'].startswith('2-') and sg_get_doc6['_id'] == doc_id6

    # Create seventh sg doc with mobile opt in  and update via sdk with opt out
    doc_id7 = 'mobileoptin_sg_doc_sdkupdate_optout'
    doc_body = document.create_doc(doc_id=doc_id7, channels=['mobileOptIn'], prop_generator=update_mobile_prop)
    doc = sg_client.add_doc(url=sg_url, db=sg_db, doc=doc_body, auth=test_auth_session)
    sg_get_doc7 = sg_client.get_doc(url=sg_url, db=sg_db, doc_id=doc_id7, auth=test_auth_session)
    log_info("Sg sixth doc is {}".format(sg_get_doc7))
    sg_get_doc7["type"] = "mobile opt out"
    sdk_client.upsert(doc_id7, sg_get_doc7)
    with pytest.raises(HTTPError) as he:
        sg_get_doc7 = sg_client.get_doc(url=sg_url, db=sg_db, doc_id=doc_id7, auth=test_auth_session)
    log_info(he.value)
    assert he.value.message.startswith('404 Client Error: Not Found for url:')
    # TODO : verify _changes that it shows tombstone revisions -> it will happen on 2.0

    # Create eighth sdk doc with import disabled and add mobile property and update via sg. Case #7
    sg_conf_name = "xattrs/mobile_opt_in_no_import"
    sg_no_import_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)
    sg_util = SyncGateway()
    sg_util.start_sync_gateways(cluster_config=cluster_conf, url=sg_url, config=sg_no_import_conf)

    doc_id8 = 'mobile_opt_in_sg_rewrite_with_importdisabled'
    doc_body = document.create_doc(doc_id=doc_id8, channels=['mobileOptIn'], prop_generator=update_mobile_prop)
    sdk_client.upsert(doc_id8, doc_body)
    with pytest.raises(HTTPError) as he:
        sg_client.add_doc(url=sg_url, db=sg_db, doc=doc_body, auth=test_auth_session)
    log_info(he.value)
    assert he.value.message.startswith('409 Client Error: Conflict for url:')
    sg_client.update_doc(url=sg_url, db=sg_db, doc_id=doc_id8, number_updates=1, auth=test_auth_session)
    sg_get_doc8 = sg_client.get_doc(url=sg_url, db=sg_db, doc_id=doc_id8, auth=test_auth_session)
    assert sg_get_doc8['_rev'].startswith('2-') and sg_get_doc8['_id'] == doc_id8

    # Create ninth sdk doc with import disabled and add mobile property and update via sg. Case #8
    doc_id9 = 'mobile_opt_out_sg_rewrite_with_importdisabled'
    doc_body = document.create_doc(doc_id=doc_id9, channels=['mobileOptIn'], prop_generator=update_non_mobile_prop)
    sdk_client.upsert(doc_id9, doc_body)
    sg_client.add_doc(url=sg_url, db=sg_db, doc=doc_body, auth=test_auth_session)
    # sg_client.update_doc(url=sg_url, db=sg_db, doc_id=doc_id8, number_updates=1, auth=test_auth_session)
    sg_get_doc9 = sg_client.get_doc(url=sg_url, db=sg_db, doc_id=doc_id9, auth=test_auth_session)
    assert sg_get_doc9['_rev'].startswith('1-') and sg_get_doc9['_id'] == doc_id9
Beispiel #52
0
def get(key, bucket = "default", password = ""):

    cb = Bucket(host=cfg.COUCHBASE_IP + "/" + self.bucket)
    rc = cb.get(key)
    return rc
class SDKClient(object):
    """Python SDK Client Implementation for testrunner - master branch Implementation"""
    def __init__(self,
                 bucket,
                 hosts=["localhost"],
                 scheme="couchbase",
                 ssl_path=None,
                 uhm_options=None,
                 password=None,
                 quiet=True,
                 certpath=None,
                 transcoder=None):
        self.connection_string = \
            self._createString(scheme = scheme, bucket = bucket, hosts = hosts,
                               certpath = certpath, uhm_options = uhm_options)
        self.password = password
        self.quiet = quiet
        self.transcoder = transcoder
        self.default_timeout = 0
        self._createConn()
        couchbase.set_json_converters(json.dumps, json.loads)

    def _createString(self,
                      scheme="couchbase",
                      bucket=None,
                      hosts=["localhost"],
                      certpath=None,
                      uhm_options=""):
        connection_string = "{0}://{1}".format(
            scheme, ", ".join(hosts).replace(" ", ""))
        if bucket != None:
            connection_string = "{0}/{1}".format(connection_string, bucket)
        if uhm_options != None:
            connection_string = "{0}?{1}".format(connection_string,
                                                 uhm_options)
        if scheme == "couchbases":
            if "?" in connection_string:
                connection_string = "{0},certpath={1}".format(
                    connection_string, certpath)
            else:
                connection_string = "{0}?certpath={1}".format(
                    connection_string, certpath)
        return connection_string

    def _createConn(self):
        try:
            self.cb = CouchbaseBucket(self.connection_string,
                                      password=self.password,
                                      quiet=self.quiet,
                                      transcoder=self.transcoder)
            self.default_timeout = self.cb.timeout
        except BucketNotFoundError as e:
            raise

    def reconnect(self):
        self.cb.close()
        self._createConn()

    def close(self):
        self.cb._close()

    def counter_in(self,
                   key,
                   path,
                   delta,
                   create_parents=True,
                   cas=0,
                   ttl=0,
                   persist_to=0,
                   replicate_to=0):
        try:
            return self.cb.counter_in(key,
                                      path,
                                      delta,
                                      create_parents=create_parents,
                                      cas=cas,
                                      ttl=ttl,
                                      persist_to=persist_to,
                                      replicate_to=replicate_to)
        except CouchbaseError as e:
            raise

    def arrayappend_in(self,
                       key,
                       path,
                       value,
                       create_parents=True,
                       cas=0,
                       ttl=0,
                       persist_to=0,
                       replicate_to=0):
        try:
            return self.cb.arrayappend_in(key,
                                          path,
                                          value,
                                          create_parents=create_parents,
                                          cas=cas,
                                          ttl=ttl,
                                          persist_to=persist_to,
                                          replicate_to=replicate_to)
        except CouchbaseError as e:
            raise

    def arrayprepend_in(self,
                        key,
                        path,
                        value,
                        create_parents=True,
                        cas=0,
                        ttl=0,
                        persist_to=0,
                        replicate_to=0):
        try:
            return self.cb.arrayprepend_in(key,
                                           path,
                                           value,
                                           create_parents=create_parents,
                                           cas=cas,
                                           ttl=ttl,
                                           persist_to=persist_to,
                                           replicate_to=replicate_to)
        except CouchbaseError as e:
            raise

    def arrayaddunique_in(self,
                          key,
                          path,
                          value,
                          create_parents=True,
                          cas=0,
                          ttl=0,
                          persist_to=0,
                          replicate_to=0):
        try:
            return self.cb.addunique_in(key,
                                        path,
                                        value,
                                        create_parents=create_parents,
                                        cas=cas,
                                        ttl=ttl,
                                        persist_to=persist_to,
                                        replicate_to=replicate_to)
        except CouchbaseError as e:
            raise

    def arrayinsert_in(self,
                       key,
                       path,
                       value,
                       cas=0,
                       ttl=0,
                       persist_to=0,
                       replicate_to=0):
        try:
            return self.cb.arrayinsert_in(key,
                                          path,
                                          value,
                                          cas=cas,
                                          ttl=ttl,
                                          persist_to=persist_to,
                                          replicate_to=replicate_to)
        except CouchbaseError as e:
            raise

    def remove_in(self, key, path, cas=0, ttl=0):
        try:
            self.cb.remove_in(key, path, cas=cas, ttl=ttl)
        except CouchbaseError as e:
            raise

    def mutate_in(self, key, *specs, **kwargs):
        try:
            self.cb.mutate_in(key, *specs, **kwargs)
        except CouchbaseError as e:
            raise

    def lookup_in(self, key, *specs, **kwargs):
        try:
            self.cb.lookup_in(key, *specs, **kwargs)
        except CouchbaseError as e:
            raise

    def get_in(self, key, path):
        try:
            result = self.cb.get_in(key, path)
            return self.__translate_get(result)
        except CouchbaseError as e:
            raise

    def exists_in(self, key, path):
        try:
            self.cb.exists_in(key, path)
        except CouchbaseError as e:
            raise

    def replace_in(self,
                   key,
                   path,
                   value,
                   cas=0,
                   ttl=0,
                   persist_to=0,
                   replicate_to=0):
        try:
            return self.cb.replace_in(key,
                                      path,
                                      value,
                                      cas=cas,
                                      ttl=ttl,
                                      persist_to=persist_to,
                                      replicate_to=replicate_to)
        except CouchbaseError as e:
            raise

    def insert_in(self,
                  key,
                  path,
                  value,
                  create_parents=True,
                  cas=0,
                  ttl=0,
                  persist_to=0,
                  replicate_to=0):
        try:
            return self.cb.insert_in(key,
                                     path,
                                     value,
                                     create_parents=create_parents,
                                     cas=cas,
                                     ttl=ttl,
                                     persist_to=persist_to,
                                     replicate_to=replicate_to)
        except CouchbaseError as e:
            raise

    def upsert_in(self,
                  key,
                  path,
                  value,
                  create_parents=True,
                  cas=0,
                  ttl=0,
                  persist_to=0,
                  replicate_to=0):
        try:
            return self.cb.upsert_in(key,
                                     path,
                                     value,
                                     create_parents=create_parents,
                                     cas=cas,
                                     ttl=ttl,
                                     persist_to=persist_to,
                                     replicate_to=replicate_to)
        except CouchbaseError as e:
            raise

    def append(self,
               key,
               value,
               cas=0,
               format=None,
               persist_to=0,
               replicate_to=0):
        try:
            self.cb.append(key,
                           value,
                           cas=cas,
                           format=format,
                           persist_to=persist_to,
                           replicate_to=replicate_to)
        except CouchbaseError as e:
            try:
                time.sleep(10)
                self.cb.append(key,
                               value,
                               cas=cas,
                               format=format,
                               persist_to=persist_to,
                               replicate_to=replicate_to)
            except CouchbaseError as e:
                raise

    def append_multi(self,
                     keys,
                     cas=0,
                     format=None,
                     persist_to=0,
                     replicate_to=0):
        try:
            self.cb.append_multi(keys,
                                 cas=cas,
                                 format=format,
                                 persist_to=persist_to,
                                 replicate_to=replicate_to)
        except CouchbaseError as e:
            try:
                time.sleep(10)
                self.cb.append_multi(keys,
                                     cas=cas,
                                     format=format,
                                     persist_to=persist_to,
                                     replicate_to=replicate_to)
            except CouchbaseError as e:
                raise

    def prepend(self,
                key,
                value,
                cas=0,
                format=None,
                persist_to=0,
                replicate_to=0):
        try:
            self.cb.prepend(key,
                            value,
                            cas=cas,
                            format=format,
                            persist_to=persist_to,
                            replicate_to=replicate_to)
        except CouchbaseError as e:
            try:
                self.cb.prepend(key,
                                value,
                                cas=cas,
                                format=format,
                                persist_to=persist_to,
                                replicate_to=replicate_to)
            except CouchbaseError as e:
                raise

    def prepend_multi(self,
                      keys,
                      cas=0,
                      format=None,
                      persist_to=0,
                      replicate_to=0):
        try:
            self.cb.prepend_multi(keys,
                                  cas=cas,
                                  format=format,
                                  persist_to=persist_to,
                                  replicate_to=replicate_to)
        except CouchbaseError as e:
            try:
                time.sleep(10)
                self.cb.prepend_multi(keys,
                                      cas=cas,
                                      format=format,
                                      persist_to=persist_to,
                                      replicate_to=replicate_to)
            except CouchbaseError as e:
                raise

    def replace(self,
                key,
                value,
                cas=0,
                ttl=0,
                format=None,
                persist_to=0,
                replicate_to=0):
        try:
            self.cb.replace(key,
                            value,
                            cas=cas,
                            ttl=ttl,
                            format=format,
                            persist_to=persist_to,
                            replicate_to=replicate_to)
        except CouchbaseError as e:
            try:
                time.sleep(10)
                self.cb.replace(key,
                                value,
                                cas=cas,
                                ttl=ttl,
                                format=format,
                                persist_to=persist_to,
                                replicate_to=replicate_to)
            except CouchbaseError as e:
                raise

    def replace_multi(self,
                      keys,
                      cas=0,
                      ttl=0,
                      format=None,
                      persist_to=0,
                      replicate_to=0):
        try:
            self.cb.replace_multi(keys,
                                  cas=cas,
                                  ttl=ttl,
                                  format=format,
                                  persist_to=persist_to,
                                  replicate_to=replicate_to)
        except CouchbaseError as e:
            try:
                time.sleep(10)
                self.cb.replace_multi(keys,
                                      cas=cas,
                                      ttl=ttl,
                                      format=format,
                                      persist_to=persist_to,
                                      replicate_to=replicate_to)
            except CouchbaseError as e:
                raise

    def cas(self, key, value, cas=0, ttl=0, format=None):
        return self.cb.replace(key, value, cas=cas, format=format)

    def delete(self, key, cas=0, quiet=True, persist_to=0, replicate_to=0):
        self.remove(key,
                    cas=cas,
                    quiet=quiet,
                    persist_to=persist_to,
                    replicate_to=replicate_to)

    def remove(self, key, cas=0, quiet=True, persist_to=0, replicate_to=0):
        try:
            return self.cb.remove(key,
                                  cas=cas,
                                  quiet=quiet,
                                  persist_to=persist_to,
                                  replicate_to=replicate_to)
        except CouchbaseError as e:
            try:
                time.sleep(10)
                return self.cb.remove(key,
                                      cas=cas,
                                      quiet=quiet,
                                      persist_to=persist_to,
                                      replicate_to=replicate_to)
            except CouchbaseError as e:
                raise

    def delete(self, keys, quiet=True, persist_to=0, replicate_to=0):
        return self.remove(self,
                           keys,
                           quiet=quiet,
                           persist_to=persist_to,
                           replicate_to=replicate_to)

    def remove_multi(self, keys, quiet=True, persist_to=0, replicate_to=0):
        try:
            self.cb.remove_multi(keys,
                                 quiet=quiet,
                                 persist_to=persist_to,
                                 replicate_to=replicate_to)
        except CouchbaseError as e:
            try:
                time.sleep(10)
                self.cb.remove_multi(keys,
                                     quiet=quiet,
                                     persist_to=persist_to,
                                     replicate_to=replicate_to)
            except CouchbaseError as e:
                raise

    def set(self,
            key,
            value,
            cas=0,
            ttl=0,
            format=None,
            persist_to=0,
            replicate_to=0):
        return self.upsert(key,
                           value,
                           cas=cas,
                           ttl=ttl,
                           format=format,
                           persist_to=persist_to,
                           replicate_to=replicate_to)

    def upsert(self,
               key,
               value,
               cas=0,
               ttl=0,
               format=None,
               persist_to=0,
               replicate_to=0):
        try:
            return self.cb.upsert(key, value, cas, ttl, format, persist_to,
                                  replicate_to)
        except CouchbaseError as e:
            try:
                time.sleep(10)
                return self.cb.upsert(key, value, cas, ttl, format, persist_to,
                                      replicate_to)
            except CouchbaseError as e:
                raise

    def set_multi(self,
                  keys,
                  ttl=0,
                  format=None,
                  persist_to=0,
                  replicate_to=0):
        return self.upsert_multi(keys,
                                 ttl=ttl,
                                 format=format,
                                 persist_to=persist_to,
                                 replicate_to=replicate_to)

    def upsert_multi(self,
                     keys,
                     ttl=0,
                     format=None,
                     persist_to=0,
                     replicate_to=0):
        try:
            self.cb.upsert_multi(keys,
                                 ttl=ttl,
                                 format=format,
                                 persist_to=persist_to,
                                 replicate_to=replicate_to)
        except CouchbaseError as e:
            try:
                time.sleep(10)
                self.cb.upsert_multi(keys,
                                     ttl=ttl,
                                     format=format,
                                     persist_to=persist_to,
                                     replicate_to=replicate_to)
            except CouchbaseError as e:
                raise

    def insert(self,
               key,
               value,
               ttl=0,
               format=None,
               persist_to=0,
               replicate_to=0):
        try:
            self.cb.insert(key,
                           value,
                           ttl=ttl,
                           format=format,
                           persist_to=persist_to,
                           replicate_to=replicate_to)
        except CouchbaseError as e:
            try:
                time.sleep(10)
                self.cb.insert(key,
                               value,
                               ttl=ttl,
                               format=format,
                               persist_to=persist_to,
                               replicate_to=replicate_to)
            except CouchbaseError as e:
                raise

    def insert_multi(self,
                     keys,
                     ttl=0,
                     format=None,
                     persist_to=0,
                     replicate_to=0):
        try:
            self.cb.insert_multi(keys,
                                 ttl=ttl,
                                 format=format,
                                 persist_to=persist_to,
                                 replicate_to=replicate_to)
        except CouchbaseError as e:
            try:
                time.sleep(10)
                self.cb.insert_multi(keys,
                                     ttl=ttl,
                                     format=format,
                                     persist_to=persist_to,
                                     replicate_to=replicate_to)
            except CouchbaseError as e:
                raise

    def touch(self, key, ttl=0):
        try:
            self.cb.touch(key, ttl=ttl)
        except CouchbaseError as e:
            try:
                time.sleep(10)
                self.cb.touch(key, ttl=ttl)
            except CouchbaseError as e:
                raise

    def touch_multi(self, keys, ttl=0):
        try:
            self.cb.touch_multi(keys, ttl=ttl)
        except CouchbaseError as e:
            try:
                time.sleep(10)
                self.cb.touch_multi(keys, ttl=ttl)
            except CouchbaseError as e:
                raise

    def decr(self, key, delta=1, initial=None, ttl=0):
        self.counter(key, delta=-delta, initial=initial, ttl=ttl)

    def decr_multi(self, keys, delta=1, initial=None, ttl=0):
        self.counter_multi(keys, delta=-delta, initial=initial, ttl=ttl)

    def incr(self, key, delta=1, initial=None, ttl=0):
        self.counter(key, delta=delta, initial=initial, ttl=ttl)

    def incr_multi(self, keys, delta=1, initial=None, ttl=0):
        self.counter_multi(keys, delta=delta, initial=initial, ttl=ttl)

    def counter(self, key, delta=1, initial=None, ttl=0):
        try:
            self.cb.counter(key, delta=delta, initial=initial, ttl=ttl)
        except CouchbaseError as e:
            try:
                time.sleep(10)
                self.cb.counter(key, delta=delta, initial=initial, ttl=ttl)
            except CouchbaseError as e:
                raise

    def counter_multi(self, keys, delta=1, initial=None, ttl=0):
        try:
            self.cb.counter_multi(keys, delta=delta, initial=initial, ttl=ttl)
        except CouchbaseError as e:
            try:
                time.sleep(10)
                self.cb.counter_multi(keys,
                                      delta=delta,
                                      initial=initial,
                                      ttl=ttl)
            except CouchbaseError as e:
                raise

    def get(self, key, ttl=0, quiet=True, replica=False, no_format=False):
        try:
            rv = self.cb.get(key,
                             ttl=ttl,
                             quiet=quiet,
                             replica=replica,
                             no_format=no_format)
            return self.__translate_get(rv)
        except CouchbaseError as e:
            try:
                time.sleep(10)
                rv = self.cb.get(key,
                                 ttl=ttl,
                                 quiet=quiet,
                                 replica=replica,
                                 no_format=no_format)
                return self.__translate_get(rv)
            except CouchbaseError as e:
                raise

    def rget(self, key, replica_index=None, quiet=True):
        try:
            data = self.rget(key, replica_index=replica_index, quiet=None)
            return self.__translate_get(data)
        except CouchbaseError as e:
            try:
                time.sleep(10)
                data = self.rget(key, replica_index=replica_index, quiet=None)
                return self.__translate_get(data)
            except CouchbaseError as e:
                raise

    def get_multi(self,
                  keys,
                  ttl=0,
                  quiet=True,
                  replica=False,
                  no_format=False):
        try:
            data = self.cb.get_multi(keys,
                                     ttl=ttl,
                                     quiet=quiet,
                                     replica=replica,
                                     no_format=no_format)
            return self.__translate_get_multi(data)
        except CouchbaseError as e:
            try:
                time.sleep(10)
                data = self.cb.get_multi(keys,
                                         ttl=ttl,
                                         quiet=quiet,
                                         replica=replica,
                                         no_format=no_format)
                return self.__translate_get_multi(data)
            except CouchbaseError as e:
                raise

    def rget_multi(self, key, replica_index=None, quiet=True):
        try:
            data = self.cb.rget_multi(key, replica_index=None, quiet=quiet)
            return self.__translate_get_multi(data)
        except CouchbaseError as e:
            try:
                time.sleep(10)
                data = self.cb.rget_multi(key, replica_index=None, quiet=quiet)
                return self.__translate_get_multi(data)
            except CouchbaseError as e:
                raise

    def stats(self, keys=None):
        try:
            stat_map = self.cb.stats(keys=keys)
            return stat_map
        except CouchbaseError as e:
            try:
                time.sleep(10)
                return self.cb.stats(keys=keys)
            except CouchbaseError as e:
                raise

    def errors(self, clear_existing=True):
        try:
            rv = self.cb.errors(clear_existing=clear_existing)
            return rv
        except CouchbaseError as e:
            raise

    def observe(self, key, master_only=False):
        try:
            return self.cb.observe(key, master_only=master_only)
        except CouchbaseError as e:
            try:
                time.sleep(10)
                return self.cb.observe(key, master_only=master_only)
            except CouchbaseError as e:
                raise

    def observe_multi(self, keys, master_only=False):
        try:
            data = self.cb.observe_multi(keys, master_only=master_only)
            return self.__translate_observe_multi(data)
        except CouchbaseError as e:
            try:
                time.sleep(10)
                data = self.cb.observe_multi(keys, master_only=master_only)
                return self.__translate_observe_multi(data)
            except CouchbaseError as e:
                raise

    def endure(self,
               key,
               persist_to=-1,
               replicate_to=-1,
               cas=0,
               check_removed=False,
               timeout=5.0,
               interval=0.010):
        try:
            self.cb.endure(key,
                           persist_to=persist_to,
                           replicate_to=replicate_to,
                           cas=cas,
                           check_removed=check_removed,
                           timeout=timeout,
                           interval=interval)
        except CouchbaseError as e:
            try:
                time.sleep(10)
                self.cb.endure(key,
                               persist_to=persist_to,
                               replicate_to=replicate_to,
                               cas=cas,
                               check_removed=check_removed,
                               timeout=timeout,
                               interval=interval)
            except CouchbaseError as e:
                raise

    def endure_multi(self,
                     keys,
                     persist_to=-1,
                     replicate_to=-1,
                     cas=0,
                     check_removed=False,
                     timeout=5.0,
                     interval=0.010):
        try:
            self.cb.endure(keys,
                           persist_to=persist_to,
                           replicate_to=replicate_to,
                           cas=cas,
                           check_removed=check_removed,
                           timeout=timeout,
                           interval=interval)
        except CouchbaseError as e:
            try:
                time.sleep(10)
                self.cb.endure(keys,
                               persist_to=persist_to,
                               replicate_to=replicate_to,
                               cas=cas,
                               check_removed=check_removed,
                               timeout=timeout,
                               interval=interval)
            except CouchbaseError as e:
                raise

    def lock(self, key, ttl=0):
        try:
            data = self.cb.lock(key, ttl=ttl)
            return self.__translate_get(data)
        except CouchbaseError as e:
            try:
                time.sleep(10)
                data = self.cb.lock(key, ttl=ttl)
                return self.__translate_get(data)
            except CouchbaseError as e:
                raise

    def lock_multi(self, keys, ttl=0):
        try:
            data = self.cb.lock_multi(keys, ttl=ttl)
            return self.__translate_get_multi(data)
        except CouchbaseError as e:
            try:
                time.sleep(10)
                data = self.cb.lock_multi(keys, ttl=ttl)
                return self.__translate_get_multi(data)
            except CouchbaseError as e:
                raise

    def unlock(self, key, ttl=0):
        try:
            return self.cb.unlock(key)
        except CouchbaseError as e:
            try:
                time.sleep(10)
                return self.cb.unlock(key)
            except CouchbaseError as e:
                raise

    def unlock_multi(self, keys):
        try:
            return self.cb.unlock_multi(keys)
        except CouchbaseError as e:
            try:
                time.sleep(10)
                return self.cb.unlock_multi(keys)
            except CouchbaseError as e:
                raise

    def n1ql_query(self, statement, prepared=False):
        try:
            return N1QLQuery(statement, prepared)
        except CouchbaseError as e:
            raise

    def n1ql_request(self, query):
        try:
            return N1QLRequest(query, self.cb)
        except CouchbaseError as e:
            raise

    def __translate_get_multi(self, data):
        map = {}
        if data == None:
            return map
        for key, result in data.items():
            map[key] = [result.flags, result.cas, result.value]
        return map

    def __translate_get(self, data):
        return data.flags, data.cas, data.value

    def __translate_delete(self, data):
        return data

    def __translate_observe(self, data):
        return data

    def __translate_observe_multi(self, data):
        map = {}
        if data == None:
            return map
        for key, result in data.items():
            map[key] = result.value
        return map

    def __translate_upsert_multi(self, data):
        map = {}
        if data == None:
            return map
        for key, result in data.items():
            map[key] = result
        return map

    def __translate_upsert_op(self, data):
        return data.rc, data.success, data.errstr, data.key
Beispiel #54
0
import csv
import json
from couchbase.bucket import Bucket
from couchbase.exceptions import CouchbaseTransientError

from couchbase.bucket import Bucket

cb = Bucket('couchbase://192.168.61.101/pysample?operation_timeout=30')

# for row in csv_o:
#     cb.upsert('{0}'.format(row['id']), row)
#     cb.upsert_multi({
#         '{0}'.format("ITEM1::" + row['id']) : {"shirt" : "location"} ,
#         '{0}'.format("ITEM2::" + row['id']) : {"pants" : "location"} ,
#         '{0}'.format("ITEM3::" + row['id']) : {"shoe" : "location"}
#     })


batches = []
cur_batch = []
batch_size = 1000
x = 0
y = 0
# thebatch.append(cur_batch)

csvfile = open('Crimes_-_2001_to_present.csv', 'r')
# jsonfile = open('file.json', 'w')

fieldnames = ("ID","Case Number","Date","Block","IUCR","Primary Type","Description","Location Description","Arrest","Domestic","Beat","District","Ward","Community Area","FBI Code","X Coordinate","Y Coordinate","Year","Updated On","Latitude","Longitude","Location")
reader = csv.reader(csvfile, fieldnames)
Beispiel #55
0
class CBGen(CBAsyncGen):

    TIMEOUT = 10  # seconds

    def __init__(self,
                 ssl_mode: str = 'none',
                 n1ql_timeout: int = None,
                 **kwargs):

        connection_string = 'couchbase://{host}/{bucket}?password={password}&{params}'
        connstr_params = parse.urlencode(kwargs["connstr_params"])

        if ssl_mode == 'data':
            connection_string = connection_string.replace(
                'couchbase', 'couchbases')
            connection_string += '&certpath=root.pem'

        connection_string = connection_string.format(
            host=kwargs['host'],
            bucket=kwargs['bucket'],
            password=kwargs['password'],
            params=connstr_params)

        self.client = Bucket(connection_string=connection_string)
        self.client.timeout = self.TIMEOUT
        if n1ql_timeout:
            self.client.n1ql_timeout = n1ql_timeout
        logger.info("Connection string: {}".format(connection_string))

    @quiet
    @backoff
    def create(self, *args, **kwargs):
        super().create(*args, **kwargs)

    @quiet
    @backoff
    def create_durable(self, *args, **kwargs):
        super().create_durable(*args, **kwargs)

    @quiet
    @backoff
    @timeit
    def read(self, *args, **kwargs):
        super().read(*args, **kwargs)

    @quiet
    @backoff
    @timeit
    def update(self, *args, **kwargs):
        super().update(*args, **kwargs)

    @quiet
    @backoff
    @timeit
    def update_durable(self, *args, **kwargs):
        super().update_durable(*args, **kwargs)

    @quiet
    def delete(self, *args, **kwargs):
        super().delete(*args, **kwargs)

    @timeit
    def view_query(self, ddoc: str, view: str, query: ViewQuery):
        tuple(self.client.query(ddoc, view, query=query))

    @quiet
    @timeit
    def n1ql_query(self, query: N1QLQuery):
        tuple(self.client.n1ql_query(query))
Beispiel #56
0
from couchbase.transcoder import Transcoder
from couchbase.bucket import Bucket


class ReverseTranscoder(Transcoder):
    def encode_key(self, key):
        return super(ReverseTranscoder, self).encode_key(key[::-1])

    def decode_key(self, key):
        key = super(ReverseTranscoder, self).decode_key(key)
        return key[::-1]


c_reversed = Bucket('couchbase://localhost/default',
                    transcoder=ReverseTranscoder())
c_plain = Bucket('couchbase://localhost/default')

c_plain.remove_multi(('ABC', 'CBA', 'XYZ', 'ZYX'), quiet=True)

c_reversed.upsert("ABC", "This is a reversed key")

rv = c_plain.get("CBA")
print("Got value for reversed key '{0}'".format(rv.value))

rv = c_reversed.get("ABC")
print("Got value for reversed key '{0}' again".format(rv.value))

c_plain.upsert("ZYX", "This is really ZYX")

rv = c_reversed.get("XYZ")
print("Got value for '{0}': '{1}'".format(rv.key, rv.value))
Beispiel #57
0
def delete(key, bucket = "default", password = ""):

    cb = Bucket(host=cfg.COUCHBASE_IP + "/" + self.bucket)
    rc = cb.delete(key)
#!/usr/bin/env - python
from couchbase.bucket import Bucket
import couchbase.subdocument as SD
import settings
import random
import time

bucket_name = settings.BUCKET_NAME
user = settings.USERNAME
password = settings.PASSWORD
node = settings.CLUSTER_NODES[0]

SDK_CLIENT = Bucket('couchbase://{0}/{1}'.format(node, bucket_name),
                    username=user,
                    password=password)

SDK_CLIENT.timeout = 15

while True:
    try:
        print "."
        results = SDK_CLIENT.n1ql_query(
            'SELECT symbol,price FROM {} WHERE symbol IS NOT MISSING AND price IS NOT MISSING'
            .format(bucket_name, ))
        for row in results:
            stock_key = "stock:" + (row['symbol'])
            # perturb the price and round it to 2 decimal places
            price_multiplier = random.normalvariate(1, 0.025)
            if row['symbol'] == "CBSE" and price_multiplier < 1:
                price_multiplier = 1
            new_price = float(row['price']) * price_multiplier
Beispiel #59
0
 def get_server_pool_db(self):
     self.bucket = Bucket('couchbase://' + SERVER_POOL_DB_HOST +
                          '/QE-Test-Suites?operation_timeout=60',
                          lockmode=LOCKMODE_WAIT)
     return self.bucket
Beispiel #60
0
def main():
    global SERVER_MANAGER
    global TIMEOUT
    global SSH_NUM_RETRIES
    global SSH_POLL_INTERVAL

    usage = '%prog -s suitefile -v version -o OS'
    parser = OptionParser(usage)
    parser.add_option('-v', '--version', dest='version')
    parser.add_option(
        '-r', '--run',
        dest='run')  # run is ambiguous but it means 12 hour or weekly
    parser.add_option('-o', '--os', dest='os')
    parser.add_option('-n',
                      '--noLaunch',
                      action="store_true",
                      dest='noLaunch',
                      default=False)
    parser.add_option('-c', '--component', dest='component', default=None)
    parser.add_option('-p', '--poolId', dest='poolId', default='12hour')
    parser.add_option('-a', '--addPoolId', dest='addPoolId', default=None)
    parser.add_option('-t',
                      '--test',
                      dest='test',
                      default=False,
                      action='store_true')  # use the test Jenkins
    parser.add_option('-s',
                      '--subcomponent',
                      dest='subcomponent',
                      default=None)
    parser.add_option('-e',
                      '--extraParameters',
                      dest='extraParameters',
                      default=None)
    parser.add_option('-y', '--serverType', dest='serverType',
                      default='VM')  # or could be Docker
    parser.add_option('-u', '--url', dest='url', default=None)
    parser.add_option('-j', '--jenkins', dest='jenkins', default=None)
    parser.add_option('-b', '--branch', dest='branch', default='master')
    parser.add_option('-g', '--cherrypick', dest='cherrypick', default=None)
    # whether to use production version of a test_suite_executor or test version
    parser.add_option('-l',
                      '--launch_job',
                      dest='launch_job',
                      default='test_suite_executor')
    parser.add_option('-f',
                      '--jenkins_server_url',
                      dest='jenkins_server_url',
                      default='http://qa.sc.couchbase.com')
    parser.add_option('-m', '--rerun_params', dest='rerun_params', default='')
    parser.add_option('-i', '--retries', dest='retries', default='1')
    parser.add_option('-q',
                      '--fresh_run',
                      dest='fresh_run',
                      default=False,
                      action='store_true')
    parser.add_option('-k',
                      '--include_tests',
                      dest='include_tests',
                      default=None)
    parser.add_option('-x',
                      '--server_manager',
                      dest='SERVER_MANAGER',
                      default='172.23.105.177:8081')
    parser.add_option('-z', '--timeout', dest='TIMEOUT', default='60')
    parser.add_option('-w', '--check_vm', dest='check_vm', default="False")
    parser.add_option('--ssh_poll_interval',
                      dest='SSH_POLL_INTERVAL',
                      default="20")
    parser.add_option('--ssh_num_retries', dest='SSH_NUM_RETRIES', default="3")

    # set of parameters for testing purposes.
    #TODO: delete them after successful testing

    # dashboardReportedParameters is of the form param1=abc,param2=def
    parser.add_option('-d',
                      '--dashboardReportedParameters',
                      dest='dashboardReportedParameters',
                      default=None)

    options, args = parser.parse_args()

    #Fix the OS for addPoolServers. See CBQE-5609 for details
    addPoolServer_os = "centos"
    print(('the run is', options.run))
    print(('the  version is', options.version))
    releaseVersion = float('.'.join(options.version.split('.')[:2]))
    print(('release version is', releaseVersion))

    print(('nolaunch', options.noLaunch))
    print(('os', options.os))
    # print('url', options.url)

    print(('url is', options.url))
    print(('cherrypick command is', options.cherrypick))

    print(('the reportedParameters are', options.dashboardReportedParameters))

    print(('rerun params are', options.rerun_params))
    print(('Server Manager is ', options.SERVER_MANAGER))
    print(('Timeout is ', options.TIMEOUT))

    if options.SERVER_MANAGER:
        SERVER_MANAGER = options.SERVER_MANAGER
    if options.TIMEOUT:
        TIMEOUT = int(options.TIMEOUT)
    if options.SSH_POLL_INTERVAL:
        SSH_POLL_INTERVAL = int(options.SSH_POLL_INTERVAL)
    if options.SSH_NUM_RETRIES:
        SSH_NUM_RETRIES = int(options.SSH_NUM_RETRIES)

    # What do we do with any reported parameters?
    # 1. Append them to the extra (testrunner) parameters
    # 2. Append the right hand of the equals sign to the subcomponent to make a report descriptor

    if options.extraParameters is None:
        if options.dashboardReportedParameters is None:
            runTimeTestRunnerParameters = None
        else:
            runTimeTestRunnerParameters = options.dashboardReportedParameters
    else:
        runTimeTestRunnerParameters = options.extraParameters
        if options.dashboardReportedParameters is not None:
            runTimeTestRunnerParameters = options.extraParameters + ',' + options.dashboardReportedParameters

    # f = open(options.suiteFile)
    # data = f.readlines()

    testsToLaunch = []

    # for d in data:
    #  fields = d.split()
    #  testsToLaunch.append( {'descriptor':fields[0],'confFile':fields[1],'iniFile':fields[2],
    #                         'serverCount':int(fields[3]), 'timeLimit':int(fields[4]),
    #                          'parameters':fields[5]})

    cb = Bucket('couchbase://' + TEST_SUITE_DB + '/QE-Test-Suites')

    if options.run == "12hr_weekly":
        suiteString = "('12hour' in partOf or 'weekly' in partOf)"
    else:
        suiteString = "'" + options.run + "' in partOf"

    if options.component is None or options.component == 'None':
        queryString = "select * from `QE-Test-Suites` where " + suiteString + " order by component"
    else:
        if options.subcomponent is None or options.subcomponent == 'None':
            splitComponents = options.component.split(',')
            componentString = ''
            for i in range(len(splitComponents)):
                componentString = componentString + "'" + splitComponents[
                    i] + "'"
                if i < len(splitComponents) - 1:
                    componentString = componentString + ','

            queryString = "select * from `QE-Test-Suites` where {0} and component in [{1}] order by component;".format(
                suiteString, componentString)

        else:
            # have a subcomponent, assume only 1 component

            splitSubcomponents = options.subcomponent.split(',')
            subcomponentString = ''
            for i in range(len(splitSubcomponents)):
                print(('subcomponentString is', subcomponentString))
                subcomponentString = subcomponentString + "'" + splitSubcomponents[
                    i] + "'"
                if i < len(splitSubcomponents) - 1:
                    subcomponentString = subcomponentString + ','
            queryString = "select * from `QE-Test-Suites` where {0} and component in ['{1}'] and subcomponent in [{2}];". \
                format(suiteString, options.component, subcomponentString)

    print(
        ('the query is', queryString))  # .format(options.run, componentString)
    query = N1QLQuery(queryString)
    results = cb.n1ql_query(queryString)

    framework = None
    for row in results:
        try:
            data = row['QE-Test-Suites']
            data['config'] = data['config'].rstrip(
            )  # trailing spaces causes problems opening the files
            print(('row', data))

            # check any os specific
            if 'os' not in data or (data['os'] == options.os) or \
                    (data['os'] == 'linux' and options.os in {'centos', 'ubuntu'}):

                # and also check for which release it is implemented in
                if 'implementedIn' not in data or releaseVersion >= float(
                        data['implementedIn']):
                    if 'jenkins' in data:
                        # then this is sort of a special case, launch the old style Jenkins job
                        # not implemented yet
                        print(('Old style Jenkins', data['jenkins']))
                    else:
                        if 'initNodes' in data:
                            initNodes = data['initNodes'].lower() == 'true'
                        else:
                            initNodes = True
                        if 'installParameters' in data:
                            installParameters = data['installParameters']
                        else:
                            installParameters = 'None'
                        if 'slave' in data:
                            slave = data['slave']
                        else:
                            slave = 'P0'
                        if 'owner' in data:
                            owner = data['owner']
                        else:
                            owner = 'QE'
                        if 'mailing_list' in data:
                            mailing_list = data['mailing_list']
                        else:
                            mailing_list = '*****@*****.**'
                        if 'mode' in data:
                            mode = data["mode"]
                        else:
                            mode = 'java'
                        if 'framework' in data:
                            framework = data["framework"]
                        else:
                            framework = 'testrunner'
                        # if there's an additional pool, get the number
                        # of additional servers needed from the ini
                        addPoolServerCount = getNumberOfAddpoolServers(
                            data['config'], options.addPoolId)

                        testsToLaunch.append({
                            'component':
                            data['component'],
                            'subcomponent':
                            data['subcomponent'],
                            'confFile':
                            data['confFile'],
                            'iniFile':
                            data['config'],
                            'serverCount':
                            getNumberOfServers(data['config']),
                            'ssh_username':
                            get_ssh_username(data['config']),
                            'ssh_password':
                            get_ssh_password(data['config']),
                            'addPoolServerCount':
                            addPoolServerCount,
                            'timeLimit':
                            data['timeOut'],
                            'parameters':
                            data['parameters'],
                            'initNodes':
                            initNodes,
                            'installParameters':
                            installParameters,
                            'slave':
                            slave,
                            'owner':
                            owner,
                            'mailing_list':
                            mailing_list,
                            'mode':
                            mode
                        })
                else:
                    print((data['component'], data['subcomponent'],
                           ' is not supported in this release'))
            else:
                print(('OS does not apply to', data['component'],
                       data['subcomponent']))

        except Exception as e:
            print('exception in querying tests, possible bad record')
            print((traceback.format_exc()))
            print(data)

    print('tests to launch:')
    for i in testsToLaunch:
        print((i['component'], i['subcomponent']))
    print('\n\n')

    launchStringBase = str(options.jenkins_server_url) + '/job/' + str(
        options.launch_job)

    # optional add [-docker] [-Jenkins extension]
    if options.serverType.lower() == 'docker':
        launchStringBase = launchStringBase + '-docker'
    if options.test:
        launchStringBase = launchStringBase + '-test'
    #     if options.framework.lower() == "jython":
    if framework == "jython":
        launchStringBase = launchStringBase + '-jython'
    if framework == "TAF":
        launchStringBase = launchStringBase + '-TAF'
    elif options.jenkins is not None:
        launchStringBase = launchStringBase + '-' + options.jenkins

    # this are VM/Docker dependent - or maybe not
    launchString = launchStringBase + '/buildWithParameters?token=test_dispatcher&' + \
                        'version_number={0}&confFile={1}&descriptor={2}&component={3}&subcomponent={4}&' + \
                         'iniFile={5}&parameters={6}&os={7}&initNodes={' \
                         '8}&installParameters={9}&branch={10}&slave={' \
                         '11}&owners={12}&mailing_list={13}&mode={14}&timeout={15}'
    if options.rerun_params:
        rerun_params = options.rerun_params.strip('\'')
        launchString = launchString + '&' + urllib.parse.urlencode(
            {"rerun_params": rerun_params})
    launchString = launchString + '&retries=' + options.retries
    if options.include_tests:
        launchString = launchString + '&include_tests=' + urllib.parse.quote(
            options.include_tests.replace("'", " ").strip())
    launchString = launchString + '&fresh_run=' + urllib.parse.quote(
        str(options.fresh_run).lower())
    if options.url is not None:
        launchString = launchString + '&url=' + options.url
    if options.cherrypick is not None:
        launchString = launchString + '&cherrypick=' + urllib.parse.quote(
            options.cherrypick)
    currentDispatcherJobUrl = OS.getenv("BUILD_URL")
    currentExecutorParams = get_jenkins_params.get_params(
        currentDispatcherJobUrl)
    currentExecutorParams['dispatcher_url'] = OS.getenv('JOB_URL')
    currentExecutorParams = json.dumps(currentExecutorParams)

    summary = []
    servers = []

    while len(testsToLaunch) > 0:
        try:
            # this bit is Docker/VM dependent
            serverCount = get_available_servers_count(options=options,
                                                      os_version=options.os)
            if serverCount and serverCount > 0:
                # see if we can match a test
                print((time.asctime(time.localtime(time.time())), 'there are',
                       serverCount, ' servers available'))

                haveTestToLaunch = False
                i = 0
                while not haveTestToLaunch and i < len(testsToLaunch):
                    if testsToLaunch[i]['serverCount'] <= serverCount:
                        if testsToLaunch[i]['addPoolServerCount']:
                            addlServersCount = get_available_servers_count(
                                options=options,
                                is_addl_pool=True,
                                os_version=addPoolServer_os)
                            if addlServersCount == 0:
                                print(
                                    time.asctime(time.localtime(time.time())),
                                    'no {0} VMs at this time'.format(
                                        options.addPoolId))
                                i = i + 1
                            else:
                                print(
                                    time.asctime(time.localtime(time.time())),
                                    "there are {0} {1} servers available".
                                    format(addlServersCount,
                                           options.addPoolId))
                                haveTestToLaunch = True
                        else:
                            haveTestToLaunch = True
                    else:
                        i = i + 1

                if haveTestToLaunch:
                    # build the dashboard descriptor
                    dashboardDescriptor = urllib.parse.quote(
                        testsToLaunch[i]['subcomponent'])
                    if options.dashboardReportedParameters is not None:
                        for o in options.dashboardReportedParameters.split(
                                ','):
                            dashboardDescriptor += '_' + o.split('=')[1]

                    # and this is the Jenkins descriptor
                    descriptor = urllib.parse.quote(
                        testsToLaunch[i]['component'] + '-' +
                        testsToLaunch[i]['subcomponent'] + '-' +
                        time.strftime('%b-%d-%X') + '-' + options.version)

                    # grab the server resources
                    # this bit is Docker/VM dependent
                    servers = []
                    unreachable_servers = []
                    how_many = testsToLaunch[i]['serverCount'] - len(servers)
                    while how_many > 0:
                        unchecked_servers = get_servers(options=options,
                                                        descriptor=descriptor,
                                                        test=testsToLaunch[i],
                                                        how_many=how_many,
                                                        os_version=options.os)
                        if options.check_vm == "True":
                            checked_servers, bad_servers = check_servers_via_ssh(
                                servers=unchecked_servers,
                                test=testsToLaunch[i])
                            for ss in checked_servers:
                                servers.append(ss)
                            for ss in bad_servers:
                                unreachable_servers.append(ss)
                        else:
                            for ss in unchecked_servers:
                                servers.append(ss)
                        how_many = testsToLaunch[i]['serverCount'] - len(
                            servers)

                    if options.serverType.lower() != 'docker':
                        # sometimes there could be a race, before a dispatcher process acquires vms,
                        # another waiting dispatcher process could grab them, resulting in lesser vms
                        # for the second dispatcher process
                        if len(servers) != testsToLaunch[i]['serverCount']:
                            continue

                    # get additional pool servers as needed
                    addl_servers = []
                    if testsToLaunch[i]['addPoolServerCount']:
                        how_many_addl = testsToLaunch[i][
                            'addPoolServerCount'] - len(addl_servers)
                        while how_many_addl > 0:
                            unchecked_servers = get_servers(
                                options=options,
                                descriptor=descriptor,
                                test=testsToLaunch[i],
                                how_many=how_many_addl,
                                is_addl_pool=True,
                                os_version=addPoolServer_os)
                            for ss in unchecked_servers:
                                addl_servers.append(ss)
                            how_many_addl = testsToLaunch[i][
                                'addPoolServerCount'] - len(addl_servers)

                    # and send the request to the test executor

                    # figure out the parameters, there are test suite specific, and added at dispatch time
                    if runTimeTestRunnerParameters is None:
                        parameters = testsToLaunch[i]['parameters']
                    else:
                        if testsToLaunch[i]['parameters'] == 'None':
                            parameters = runTimeTestRunnerParameters
                        else:
                            parameters = testsToLaunch[i][
                                'parameters'] + ',' + runTimeTestRunnerParameters

                    url = launchString.format(
                        options.version, testsToLaunch[i]['confFile'],
                        descriptor, testsToLaunch[i]['component'],
                        dashboardDescriptor, testsToLaunch[i]['iniFile'],
                        urllib.parse.quote(parameters), options.os,
                        testsToLaunch[i]['initNodes'],
                        testsToLaunch[i]['installParameters'], options.branch,
                        testsToLaunch[i]['slave'],
                        urllib.parse.quote(testsToLaunch[i]['owner']),
                        urllib.parse.quote(testsToLaunch[i]['mailing_list']),
                        testsToLaunch[i]['mode'],
                        testsToLaunch[i]['timeLimit'])
                    url = url + '&dispatcher_params=' + \
                                   urllib.parse.urlencode({"parameters":
                                                 currentExecutorParams})

                    if options.serverType.lower() != 'docker':
                        servers_str = json.dumps(servers).replace(
                            ' ', '').replace('[', '', 1)
                        servers_str = rreplace(servers_str, ']', 1)
                        url = url + '&servers=' + urllib.parse.quote(
                            servers_str)

                        if testsToLaunch[i]['addPoolServerCount']:
                            addPoolServers = json.dumps(addl_servers).replace(
                                ' ', '').replace('[', '', 1)
                            addPoolServers = rreplace(addPoolServers, ']', 1)
                            url = url + '&addPoolServerId=' +\
                                    options.addPoolId +\
                                    '&addPoolServers=' +\
                                    urllib.parse.quote(addPoolServers)

                    if len(unreachable_servers) > 0:
                        print(
                            "The following VM(s) are unreachable for ssh connection:"
                        )
                        for s in unreachable_servers:
                            response, content = httplib2.Http(
                                timeout=TIMEOUT).request(
                                    'http://' + SERVER_MANAGER +
                                    '/releaseip/' + s + '/ssh_failed', 'GET')
                            print(s)

                    print('\n', time.asctime(time.localtime(time.time())),
                          'launching ', url)
                    print(url)
                    dispatch_job = True
                    if not options.fresh_run:
                        dispatch_job = \
                            find_rerun_job.should_dispatch_job(
                                options.os, testsToLaunch[i][
                                    'component'], testsToLaunch[i][
                                    'subcomponent'], options.version)

                    if options.noLaunch or not dispatch_job:
                        # free the VMs
                        time.sleep(3)
                        if options.serverType.lower() == 'docker':
                            pass  # figure docker out later
                        else:
                            response, content = httplib2.Http(timeout=TIMEOUT).\
                                request('http://' + SERVER_MANAGER + '/releaseservers/' + descriptor + '/available', 'GET')
                            print('the release response', response, content)
                    else:
                        response, content = httplib2.Http(
                            timeout=TIMEOUT).request(url, 'GET')

                    testsToLaunch.pop(i)
                    summary.append({
                        'test':
                        descriptor,
                        'time':
                        time.asctime(time.localtime(time.time()))
                    })
                    if options.noLaunch:
                        pass  # no sleeping necessary
                    elif options.serverType.lower() == 'docker':
                        time.sleep(
                            240
                        )  # this is due to the docker port allocation race
                    else:
                        time.sleep(30)

                else:
                    print('not enough servers at this time')
                    time.sleep(POLL_INTERVAL)
            # endif checking for servers

        except Exception as e:
            print('have an exception')
            print((traceback.format_exc()))
            time.sleep(POLL_INTERVAL)
    # endwhile

    print('\n\n\ndone, everything is launched')
    for i in summary:
        print((i['test'], 'was launched at', i['time']))
    return