def test_xattrs_basic(self):
        cb = self.cb
        k = self.gen_key('xattrs' + random().__str__())
        cb.upsert(k, {})

        verbs = ((SD.insert, True), (SD.upsert, True), (SD.replace, False))
        for verb in verbs:
            kwargs = dict(create_parents=True) if verb[1] else {}
            # Operate on a single xattr
            rv = cb.mutate_in(
                k, [verb[0]('my.attr', 'value', xattr=True, **kwargs)])
            self.assertTrue(rv.success)

            body = cb.get(k)
            self.assertFalse('my' in body.content)
            self.assertFalse('my.attr' in body.content)

            # Try using lookup_in
            rv = cb.lookup_in(k, (SD.get('my.attr'), ))
            self.assertRaises(PathNotFoundException, rv.exists, 0)

            # Finally, use lookup_in with 'xattrs' attribute enabled
            rv = cb.lookup_in(k, (SD.get('my.attr', xattr=True), ))
            self.assertTrue(rv.exists(0))
            self.assertEqual('value', rv.content_as[str](0))
Beispiel #2
0
 def test_eventing_does_not_use_xattrs(self):
     body = self.create_save_function_body(
         self.function_name,
         HANDLER_CODE.BUCKET_OPS_WITH_TIMERS,
         dcp_stream_boundary="from_now")
     # deploy eventing function
     self.deploy_function(body)
     url = 'couchbase://{ip}/{name}'.format(ip=self.master.ip,
                                            name=self.src_bucket_name)
     bucket = Bucket(url, username="******", password="******")
     for docid in ['customer123', 'customer1234', 'customer12345']:
         bucket.upsert(docid, {'a': 1})
     self.verify_eventing_results(self.function_name,
                                  3,
                                  skip_stats_validation=True)
     # add new multiple xattrs , delete old xattrs and delete the documents
     for docid in ['customer123', 'customer1234', 'customer12345']:
         r = bucket.mutate_in(docid, SD.get('eventing', xattr=True))
         log.info(r)
         if "Could not execute one or more multi lookups or mutations" not in str(
                 r):
             self.fail("eventing is still using xattrs for timers")
         r = bucket.mutate_in(docid, SD.get('_eventing', xattr=True))
         log.info(r)
         if "Could not execute one or more multi lookups or mutations" not in str(
                 r):
             self.fail("eventing is still using xattrs for timers")
     self.undeploy_and_delete_function(body)
Beispiel #3
0
    def test_multi_lookup(self):
        cb = self.cb
        key = self.gen_key('sdmlookup')
        cb.upsert(
            key, {
                'field1': 'value1',
                'field2': 'value2',
                'array': [1, 2, 3],
                'boolean': False
            })

        rvs = cb.lookup_in(key,
                           SD.get('field1'),
                           SD.exists('field2'),
                           SD.exists('field3'),
                           quiet=True)

        self.assertFalse(rvs.success)
        self.assertEqual(3, rvs.result_count)

        self.assertEqual((0, 'value1'), rvs.get(0))
        self.assertEqual((0, 'value1'), rvs.get('field1'))
        self.assertEqual('value1', rvs[0])
        self.assertEqual('value1', rvs['field1'])

        self.assertEqual((0, None), rvs.get(1))
        self.assertEqual((0, None), rvs.get('field2'))
        self.assertEqual(None, rvs[1])
        self.assertEqual(None, rvs['field2'])

        self.assertTrue(rvs.exists('field2'))
        self.assertTrue(rvs.exists(1))
        self.assertTrue(1 in rvs)
        self.assertTrue('field2' in rvs)

        self.assertEqual((E.SubdocPathNotFoundError.CODE, None),
                         rvs.get('field3'))
        self.assertEqual((E.SubdocPathNotFoundError.CODE, None), rvs.get(2))
        self.assertFalse(rvs.exists('field3'))
        self.assertFalse(rvs.exists(2))

        def _getix(rv_, ix):
            return rv_[ix]

        self.assertRaises(E.SubdocPathNotFoundError, _getix, rvs, 2)
        self.assertRaises(E.SubdocPathNotFoundError, _getix, rvs, 'field3')
        self.assertFalse(rvs.exists('field3'))

        # See what happens when we mix operations
        self.assertRaises(E.CouchbaseError, cb.lookup_in, key,
                          SD.get('field1'), SD.insert('a', 'b'))

        # Empty path (invalid)
        self.assertRaises(E.CouchbaseError, cb.lookup_in, SD.get(''))
Beispiel #4
0
    def test_access_ok(self):
        cb = self.cb
        key = self.gen_key('non-exist')
        try:
            cb.lookup_in(key, SD.get('pth1'), quiet=True)
        except E.NotFoundError as e:
            rv = e.all_results[key]
            self.assertFalse(rv.access_ok)

        cb.upsert(key, {'hello': 'world'})
        rv = cb.lookup_in(key, SD.get('nonexist'))
        self.assertTrue(rv.access_ok)
    def test_multi_lookup(self):
        cb = self.cb
        key = self.gen_key('sdmlookup')
        cb.upsert(key, {
            'field1': 'value1',
            'field2': 'value2',
            'array': [1, 2, 3],
            'boolean': False
        })

        rvs = cb.lookup_in(
            key, SD.get('field1'), SD.exists('field2'), SD.exists('field3'),
            quiet=True
        )

        self.assertFalse(rvs.success)
        self.assertEqual(3, rvs.result_count)

        self.assertEqual((0, 'value1'), rvs.get(0))
        self.assertEqual((0, 'value1'), rvs.get('field1'))
        self.assertEqual('value1', rvs[0])
        self.assertEqual('value1', rvs['field1'])

        self.assertEqual((0, None), rvs.get(1))
        self.assertEqual((0, None), rvs.get('field2'))
        self.assertEqual(None, rvs[1])
        self.assertEqual(None, rvs['field2'])

        self.assertTrue(rvs.exists('field2'))
        self.assertTrue(rvs.exists(1))
        self.assertTrue(1 in rvs)
        self.assertTrue('field2' in rvs)

        self.assertEqual((E.SubdocPathNotFoundError.CODE, None),
                         rvs.get('field3'))
        self.assertEqual((E.SubdocPathNotFoundError.CODE, None),
                         rvs.get(2))
        self.assertFalse(rvs.exists('field3'))
        self.assertFalse(rvs.exists(2))

        def _getix(rv_, ix):
            return rv_[ix]

        self.assertRaises(E.SubdocPathNotFoundError, _getix, rvs, 2)
        self.assertRaises(E.SubdocPathNotFoundError, _getix, rvs, 'field3')
        self.assertFalse(rvs.exists('field3'))

        # See what happens when we mix operations
        self.assertRaises(E.CouchbaseError, cb.lookup_in, key,
                          SD.get('field1'), SD.insert('a', 'b'))

        # Empty path (invalid)
        self.assertRaises(E.CouchbaseError, cb.lookup_in, SD.get(''))
    def test_access_ok(self):
        cb = self.cb
        key = self.gen_key('non-exist')
        try:
            cb.lookup_in(key, SD.get('pth1'), quiet=True)
        except E.NotFoundError as e:
            rv = e.all_results[key]
            self.assertFalse(rv.access_ok)

        cb.upsert(key, {'hello': 'world'})
        rv = cb.lookup_in(key, SD.get('nonexist'))
        self.assertTrue(rv.access_ok)
Beispiel #7
0
    def test_lookup_in_timeout(self):
        self.coll.upsert("id", {'someArray': ['wibble', 'gronk']})
        # wait till it is there
        self.try_n_times(10, 1, self.coll.get, "id")

        # ok, it is there...
        self.coll.get("id", GetOptions(project=["someArray"], timeout=timedelta(seconds=1.0)))
        self.assertRaisesRegex(InvalidArgumentException, "Expected timedelta", self.coll.get, "id",
                               GetOptions(project=["someArray"], timeout=456))
        sdresult_2 = self.coll.lookup_in("id", (SD.get("someArray"),), LookupInOptions(timeout=timedelta(microseconds=1)))
        self.assertEqual(['wibble', 'gronk'],sdresult_2.content_as[list](0))
        sdresult_2 = self.coll.lookup_in("id", (SD.get("someArray"),), LookupInOptions(timeout=timedelta(seconds=1)), timeout=timedelta(microseconds=1))
        self.assertEqual(['wibble', 'gronk'],sdresult_2.content_as[list](0))
Beispiel #8
0
    def test_scenarioF_subdoc(self):

        item = Scenarios.AddressedUser("fred", 21, "45 Dupydaub Street")
        self.coll.upsert("id", item)
        subdoc = self.coll.get("id", project=("name", "age"))

        user = subdoc.content_as[Scenarios.UserPartial]
        altuser = self.coll.lookup_in("id", (SD.get("name"), SD.get("age")))
        self.assertEqual("fred", altuser.content_as[str](0))
        self.assertEqual(21, altuser.content_as[int](1))
        changed = user.with_attr(age=25)
        self.assertEqual(Scenarios.UserPartial("fred", 25), changed)

        self.coll.mutate_in(subdoc.id, [MutateSpec().upsert("user", changed)])
Beispiel #9
0
 def test_lookup_in_multiple_specs(self):
     if self.is_mock:
         raise SkipTest("mock doesn't support getting xattrs (like $document.expiry)")
     cas = self.coll.upsert(self.KEY, {"a": "aaa", "b": {"c": {"d": "yo!"}}}).cas
     self.try_n_times(10, 3, self._cas_matches, self.KEY, cas)
     result = self.coll.lookup_in(self.KEY,
                                  (SD.with_expiry(),
                                   SD.get("a"),
                                   SD.exists("b"),
                                   SD.get("b.c")))
     self.assertTrue(result.success)
     self.assertIsNone(result.expiry)
     self.assertEquals("aaa", result.content_as[str](1))
     self.assertTrue(result.exists(2))
     self.assertDictEqual({"d": "yo!"}, result.content_as[dict](3))
def registerPUT(studentId, quarterId, courseNum, offeringId):
    sched_key = studentId + "-" + quarterId
    # verify that offering is real
    try:
        offering = list(
            offering_bucket.lookup_in(quarterId,
                                      subdoc.get(courseNum + '.' +
                                                 offeringId)))[0]
    except (SubdocPathNotFoundError, NotFoundError):
        return log_make_response("Offering does not exist", 400)

    # verify that student is real
    try:
        assert db.users.get_user(studentId)
    except:
        return log_make_response("User {} does not exist".format(studentId),
                                 400)

    # see if student is already enrolled
    try:
        sched = db.schedules.get_user_schedule(studentId, quarterId)
        assert sched is None or courseNum not in sched['offerings']
        print("User is not already enrolled in course")
    except AssertionError:
        return log_make_response("User already enrolled in course",
                                 304)  # type: Response
    except NotFoundError:
        pass

    num_enrolled = offering['enrolled']
    capacity = offering['capacity']
    if num_enrolled >= capacity:
        return log_make_response("Class is full/maximum capacity reached", 403)

    # make user's schedule entry for that quarter if it doesn't exist
    db.schedules.initialize_user_schedule(studentId, quarterId)

    # try:
    # sched_bucket.insert(sched_key, {"studentId": studentId, "quarterId": quarterId})
    # except:
    # pass

    # should throw exception if already enrolled
    db.schedules.add_course_to_sched(studentId, quarterId, courseNum,
                                     offeringId)

    data = {
        'studentId': studentId,
        'quarterId': quarterId,
        'courseNum': courseNum,
        'offeringId': offeringId
    }
    offerings.incr_enrollment_count(quarterId, courseNum, offeringId)
    Neo4JPublisher().create_enrollment(data)
    return log_make_response(
        "Registered {} for {}: {}-{}".format(studentId, quarterId, courseNum,
                                             offeringId), 201)
Beispiel #11
0
async def sub_doc_operations(collection):
    key = "hotel_10025"
    res = await collection.lookup_in(key, [SD.get("reviews[0].ratings")])

    print("Review ratings: {}".format(res.content_as[dict](0)))

    res = await collection.mutate_in(
        key, [SD.replace("reviews[0].ratings.Rooms", 3.5)])
    print("CAS: {}".format(res.cas))
Beispiel #12
0
 def get_xattr(self, client_ip, sdk_conn, bucket_name):
     try:
         temp_conn, result = self.connection(client_ip, bucket_name, 'Administrator', 'password')
         self.set_xattr(temp_conn)
         k = 'sdk_1'
         rv = sdk_conn.lookup_in(k, SD.get('my', xattr=True))
         return True
     except Exception as e:
         log.info("Exception is from get_xattr function {0}".format(e))
         return False
Beispiel #13
0
 def get_xattr(self, client_ip, sdk_conn, bucket_name):
     try:
         temp_conn, result = self.connection(client_ip, bucket_name, 'Administrator', 'password')
         self.set_xattr(temp_conn)
         k = 'sdk_1'
         rv = sdk_conn.lookup_in(k, SD.get('my', xattr=True))
         return True
     except Exception as e:
         log.info("Exception is from get_xattr function {0}".format(e))
         return False
Beispiel #14
0
    def test_fulldoc(self):
        cb = self.cb
        key = self.gen_key('fulldoc')
        cb.mutate_in(key,
                     SD.upsert_fulldoc({'val': True}),
                     SD.upsert('my.xattr',
                               'attrval',
                               create_parents=True,
                               xattr=True),
                     insert_doc=True)
        self.assertEqual(True, cb.retrieve_in(key, 'val')[0])

        self.assertEqual('attrval',
                         cb.lookup_in(key, SD.get('my.xattr', xattr=True))[0])

        rv = cb.lookup_in(key, SD.get('my.xattr'))
        self.assertFalse(rv.exists(0))

        # Get the document back
        rv = cb.lookup_in(key, SD.get_fulldoc())
        self.assertEqual(True, rv[0]['val'])
    def lookup_in(self, doc_id, path):

        try:
            result = self.bucket.lookup_in(doc_id, sd.get(path))
            if result:
                return result[0]
            return None
        except Exception as error:
            print(
                'Error performing KV lookupIn for docId: {0} & path: {1}. Error: {2}'
                .format(doc_id, path, error))
            raise
 def test_eventing_does_not_use_xattrs(self):
     body = self.create_save_function_body(self.function_name, HANDLER_CODE.BUCKET_OPS_WITH_TIMERS,
                                           dcp_stream_boundary="from_now")
     # deploy eventing function
     self.deploy_function(body)
     url = 'couchbase://{ip}/{name}'.format(ip=self.master.ip, name=self.src_bucket_name)
     bucket = Bucket(url, username="******", password="******")
     for docid in ['customer123', 'customer1234', 'customer12345']:
         bucket.upsert(docid, {'a': 1})
     self.verify_eventing_results(self.function_name, 3, skip_stats_validation=True)
     # add new multiple xattrs , delete old xattrs and delete the documents
     for docid in ['customer123', 'customer1234', 'customer12345']:
         r = bucket.mutate_in(docid, SD.get('eventing', xattr=True))
         log.info(r)
         if "Could not execute one or more multi lookups or mutations" not in str(r):
             self.fail("eventing is still using xattrs for timers")
         r = bucket.mutate_in(docid, SD.get('_eventing', xattr=True))
         log.info(r)
         if "Could not execute one or more multi lookups or mutations" not in str(r):
             self.fail("eventing is still using xattrs for timers")
     self.undeploy_and_delete_function(body)
Beispiel #17
0
 def test_lookup_in_simple_get_longer_path(self):
     cas = self.coll.upsert(self.KEY, {
         "a": "aaa",
         "b": {
             "c": {
                 "d": "yo!"
             }
         }
     }).cas
     self.try_n_times(10, 3, self._cas_matches, self.KEY, cas)
     result = self.coll.lookup_in(self.KEY, (SD.get("b.c.d"), ))
     self.assertEqual(result.cas, cas)
     self.assertEqual("yo!", result.content_as[str](0))
    def save_address(self, cust_id, path, address):
        '''
            Lab 5:  K/V sub-document operation(s):
              1.  generate key:  customer_<custId>
              2.  get customer addresses
              3.  create business logic to add new address
              4.  update customer address path
              5.  update customer modified date and modifiedBy

            When updating, think about pros/cons to UPSERT v. REPLACE
        '''
        try:
            #TODO - figure out what SD.exists returns False, although SD.get returns a doc
            key = 'customer_{0}'.format(cust_id)
            result = self.__collection.lookup_in(
                key,
                [
                    #SD.exists(path),
                    SD.get(path)
                ])

            #if not result.content_as[bool](0) or result.content_as[dict](1) is None:
            if result.content_as[dict](0) is None:
                return {'success': False, 'error': None}

            addresses = result.content_as[dict](0)
            addresses[address['name']] = {
                'address': address['address'],
                'city': address['city'],
                'state': address['state'],
                'zipCode': address['zipCode'],
                'country':
                address['country'] if 'country' in address else 'US',
            }
            modified_date = int(datetime.datetime.now().timestamp())

            self.__collection.mutate_in(key, [
                SD.upsert(path, addresses),
                SD.upsert('doc.modified', modified_date),
                SD.upsert('doc.modifiedby', cust_id),
            ])
            return {'success': True, 'error': None}
        except Exception as ex:
            output_message(ex, 'repository.py:save_address() - error:')
            return {
                'success': False,
                'error': {
                    'message': repr(ex),
                    'stackTrace': traceback.format_exc()
                }
            }
Beispiel #19
0
def remove_item(bucket_name, doc_id, item):
    while True:
        cb_bucket = cluster.open_bucket(bucket_name)
        document = cb_bucket.get(doc_id)
        cur_cas = document.cas
        ifEncoded = cb_bucket.lookup_in(doc_id, SD.get('isBase64'))
        ifZiped = cb_bucket.lookup_in(doc_id, SD.get('isZip'))

        if ifZiped[0] == False and ifEncoded[0] == False:
            #read body
            body = cb_bucket.lookup_in(doc_id, SD.get('body'))[0]
            json_obj = json.loads(body)
            #print(json.dumps(json_obj, indent=2))
            iterate(json_obj, item)
            try:
                cb_bucket.mutate_in(doc_id,
                                    SD.replace('body', json.dumps(json_obj)),
                                    cas=cur_cas)
                break
            except KeyExistsError:
                continue
        else:
            bytes_body = decompress(
                base64.b64decode(
                    cb_bucket.lookup_in(doc_id, SD.get('body'))[0]))
            json_obj = json.loads(bytes_body)
            print(json.dumps(json_obj, indent=2))
            iterate(json_obj, item)
            json_obj = base64.b64encode(compress(
                json.dumps(json_obj))).decode('utf-8')
            try:
                cb_bucket.mutate_in(doc_id,
                                    SD.replace('body',
                                               json.dumps(json_obj)[1:-1]),
                                    cas=cur_cas)
                break
            except KeyExistsError:
                continue
    def test_lookup_in(self):
        res = None
        self.bucket.upsert(
            'king_arthur', {
                'name': 'Arthur',
                'email': '*****@*****.**',
                'interests': ['Holy Grail', 'African Swallows']
            })

        with tracer.start_active_span('test'):
            res = self.bucket.lookup_in('king_arthur', SD.get('email'),
                                        SD.get('interests'))

        assert (res)
        self.assertTrue(res.success)

        spans = self.recorder.queued_spans()
        self.assertEqual(2, len(spans))

        test_span = get_first_span_by_name(spans, 'sdk')
        assert (test_span)
        self.assertEqual(test_span.data["sdk"]["name"], 'test')

        cb_span = get_first_span_by_name(spans, 'couchbase')
        assert (cb_span)

        # Same traceId and parent relationship
        self.assertEqual(test_span.t, cb_span.t)
        self.assertEqual(cb_span.p, test_span.s)

        assert (cb_span.stack)
        self.assertIsNone(cb_span.ec)

        self.assertEqual(cb_span.data["couchbase"]["hostname"],
                         "%s:8091" % testenv['couchdb_host'])
        self.assertEqual(cb_span.data["couchbase"]["bucket"], 'travel-sample')
        self.assertEqual(cb_span.data["couchbase"]["type"], 'lookup_in')
 def get_string_and_verify_return(self, client, key = '', path = '', expected_value = None, xattr=None):
     new_path = self.generate_path(self.nesting_level, path)
     try:
         if self.is_sdk_client:
             if xattr:
                 data = self._fix_unicode(client.cb.lookup_in(key, SD.get(path, xattr=xattr))[path])
             else:
                 data = self._fix_unicode(client.cb.retrieve_in(key, new_path).get(0)[1])
         else:
             opaque, cas, data = client.get_sd(key, new_path)
             data = yaml.safe_load(data)
     except Exception as e:
         self.log.error(e)
         msg = "Unable to get key {0} for path {1} after {2} tries".format(key, path, 1)
         return False, msg
     return (str(data).encode('utf-8') == str(expected_value)), str(data).encode('utf-8')
Beispiel #22
0
 def get_string_and_verify_return(self, client, key = '', path = '', expected_value = None, xattr=None):
     new_path = self.generate_path(self.nesting_level, path)
     try:
         if self.is_sdk_client:
             if xattr:
                 data = self._fix_unicode(client.cb.lookup_in(key, SD.get(path, xattr=xattr))[path])
             else:
                 data = self._fix_unicode(client.cb.retrieve_in(key, new_path).get(0)[1])
         else:
             opaque, cas, data = client.get_sd(key, new_path)
             data = yaml.safe_load(data)
     except Exception as e:
         self.log.error(e)
         msg = "Unable to get key {0} for path {1} after {2} tries".format(key, path, 1)
         return False, msg
     return (str(data) == str(expected_value)), str(data).encode('utf-8')
Beispiel #23
0
 def get_and_verify_with_value(self, client, key = '', path = '', expected_value = None, xattr=None):
     new_path = self.generate_path(self.nesting_level, path)
     try:
         if self.is_sdk_client:
             if xattr:
                 data = self._fix_unicode(client.cb.lookup_in(key, SD.get(path, xattr=xattr))[path])
             else:
                 data = client.cb.retrieve_in(key, new_path).get(0)[1]
         else:
             opaque, cas, data = client.get_sd(key, new_path)
             data = json.loads(data)
     except Exception as e:
         self.log.error(e)
         msg = "Unable to get key {0} for path {1} after {2} tries".format(key, path, 1)
         return False, msg
     msg = "expected {0}, actual {1}".format(str(data), expected_value)
     return str(data) == expected_value, msg
 def get_and_verify_with_value(self, client, key = '', path = '', expected_value = None, xattr=None):
     new_path = self.generate_path(self.nesting_level, path)
     try:
         if self.is_sdk_client:
             if xattr:
                 data = self._fix_unicode(client.cb.lookup_in(key, SD.get(path, xattr=xattr))[path])
             else:
                 data = client.cb.retrieve_in(key, new_path).get(0)[1]
         else:
             opaque, cas, data = client.get_sd(key, new_path)
             data = json.loads(data)
     except Exception as e:
         self.log.error(e)
         msg = "Unable to get key {0} for path {1} after {2} tries".format(key, path, 1)
         return False, msg
     msg = "expected {0}, actual {1}".format(str(data), expected_value)
     return str(data) == expected_value, msg
Beispiel #25
0
    def test_xattrs_basic(self):
        cb = self.cb
        k = self.gen_key('xattrs')
        cb.upsert(k, {})

        # Try to upsert a single xattr
        rv = cb.mutate_in(
            k, SD.upsert('my.attr', 'value', xattr=True, create_parents=True))
        self.assertTrue(rv.success)

        body = cb.get(k)
        self.assertFalse('my' in body.value)
        self.assertFalse('my.attr' in body.value)

        # Try using lookup_in
        rv = cb.retrieve_in(k, 'my.attr')
        self.assertFalse(rv.exists('my.attr'))

        # Finally, use lookup_in with 'xattrs' attribute enabled
        rv = cb.lookup_in(k, SD.get('my.attr', xattr=True))
        self.assertTrue(rv.exists('my.attr'))
        self.assertEqual('value', rv['my.attr'])
    def test_xattrs_basic(self):
        cb = self.cb
        k = self.gen_key('xattrs')
        cb.upsert(k, {})

        # Try to upsert a single xattr
        rv = cb.mutate_in(k, SD.upsert('my.attr', 'value',
                                       xattr=True,
                                       create_parents=True))
        self.assertTrue(rv.success)

        body = cb.get(k)
        self.assertFalse('my' in body.value)
        self.assertFalse('my.attr' in body.value)

        # Try using lookup_in
        rv = cb.retrieve_in(k, 'my.attr')
        self.assertFalse(rv.exists('my.attr'))

        # Finally, use lookup_in with 'xattrs' attribute enabled
        rv = cb.lookup_in(k, SD.get('my.attr', xattr=True))
        self.assertTrue(rv.exists('my.attr'))
        self.assertEqual('value', rv['my.attr'])
Beispiel #27
0
    def verify_results(self, skip_verify_data=[], skip_verify_revid=[], sg_run=False):
        """Verify data between each couchbase and remote clusters.
        Run below steps for each source and destination cluster..
            1. Run expiry pager.
            2. Wait for disk queue size to 0 on each nodes.
            3. Wait for Outbound mutations to 0.
            4. Wait for Items counts equal to kv_store size of buckets.
            5. Verify items value on each bucket.
            6. Verify Revision id of each item.
        """
        skip_key_validation = self._input.param("skip_key_validation", False)
        self.__merge_all_buckets()
        for cb_cluster in self.get_cb_clusters():
            for remote_cluster_ref in cb_cluster.get_remote_clusters():
                try:
                    src_cluster = remote_cluster_ref.get_src_cluster()
                    dest_cluster = remote_cluster_ref.get_dest_cluster()

                    if self._evict_with_compactor:
                        for b in src_cluster.get_buckets():
                            # only need to do compaction on the source cluster, evictions are propagated to the remote
                            # cluster
                            src_cluster.get_cluster().compact_bucket(src_cluster.get_master_node(), b)

                    else:
                        src_cluster.run_expiry_pager()
                        dest_cluster.run_expiry_pager()

                    src_cluster.wait_for_flusher_empty()
                    dest_cluster.wait_for_flusher_empty()

                    src_dcp_queue_drained = src_cluster.wait_for_dcp_queue_drain()
                    dest_dcp_queue_drained = dest_cluster.wait_for_dcp_queue_drain()

                    src_cluster.wait_for_outbound_mutations()
                    dest_cluster.wait_for_outbound_mutations()
                except Exception as e:
                    # just log any exception thrown, do not fail test
                    self.log.error(e)
                if not skip_key_validation:
                    try:
                        if not sg_run:
                            src_active_passed, src_replica_passed = \
                                src_cluster.verify_items_count(timeout=self._item_count_timeout)
                            dest_active_passed, dest_replica_passed = \
                                dest_cluster.verify_items_count(timeout=self._item_count_timeout)

                        src_cluster.verify_data(max_verify=self._max_verify, skip=skip_verify_data,
                                                only_store_hash=self.only_store_hash)
                        dest_cluster.verify_data(max_verify=self._max_verify, skip=skip_verify_data,
                                                 only_store_hash=self.only_store_hash)
                        for _, cluster in enumerate(self.get_cb_clusters()):
                            for bucket in cluster.get_buckets():
                                h = httplib2.Http(".cache")
                                resp, content = h.request(
                                    "http://{0}:4984/db/_all_docs".format(cluster.get_master_node().ip))
                                self.assertEqual(json.loads(content)['total_rows'], self._num_items)
                                client = SDKClient(scheme="couchbase", hosts=[cluster.get_master_node().ip],
                                                        bucket=bucket.name).cb
                                for i in xrange(self._num_items):
                                    key = 'k_%s_%s' % (i, str(cluster).replace(' ', '_').
                                                      replace('.', '_').replace(',', '_').replace(':', '_'))
                                    res = client.get(key)
                                    for xk, xv in res.value.iteritems():
                                        rv = client.mutate_in(key, SD.get(xk, xattr=True))
                                        self.assertTrue(rv.exists(xk))
                                        self.assertEqual(xv, rv[xk])
                                    if sg_run:
                                        resp, content = h.request("http://{0}:4984/db/{1}".format(cluster.get_master_node().ip, key))
                                        self.assertEqual(json.loads(content)['_id'], key)
                                        self.assertEqual(json.loads(content)[xk], xv)
                                        self.assertTrue('2-' in json.loads(content)['_rev'])
                    except Exception as e:
                        self.log.error(e)
                    finally:
                        if not sg_run:
                            rev_err_count = self.verify_rev_ids(remote_cluster_ref.get_replications(),
                                                            skip=skip_verify_revid)
                            # we're done with the test, now report specific errors
                            if (not (src_active_passed and dest_active_passed)) and \
                                    (not (src_dcp_queue_drained and dest_dcp_queue_drained)):
                                self.fail("Incomplete replication: Keys stuck in dcp queue")
                            if not (src_active_passed and dest_active_passed):
                                self.fail("Incomplete replication: Active key count is incorrect")
                            if not (src_replica_passed and dest_replica_passed):
                                self.fail("Incomplete intra-cluster replication: "
                                          "replica count did not match active count")
                            if rev_err_count > 0:
                                self.fail("RevID verification failed for remote-cluster: {0}".
                                          format(remote_cluster_ref))

        # treat errors in self.__report_error_list as failures
        if len(self.get_report_error_list()) > 0:
            error_logger = self.check_errors_in_goxdcr_logs()
            if error_logger:
                self.fail("Errors found in logs : {0}".format(error_logger))
Beispiel #28
0
 def test_lookup_in_simple_get(self):
     cas = self.coll.upsert(self.KEY, {"a": "aaa", "b": [1, 2, 3, 4]}).cas
     self.try_n_times(10, 3, self._cas_matches, self.KEY, cas)
     result = self.coll.lookup_in(self.KEY, (SD.get("b"), ))
     self.assertEqual(result.cas, cas)
     self.assertEqual([1, 2, 3, 4], result.content_as[list](0))
def __offering_lookup_helper(qId, path):
    ob_data = offering_bucket.lookup_in(qId,
                                        subdoc.get(path))  # type: SubdocResult
    return ob_data[0]
Beispiel #30
0
    def verify_results(self,
                       skip_verify_data=[],
                       skip_verify_revid=[],
                       sg_run=False):
        """Verify data between each couchbase and remote clusters.
        Run below steps for each source and destination cluster..
            1. Run expiry pager.
            2. Wait for disk queue size to 0 on each nodes.
            3. Wait for Outbound mutations to 0.
            4. Wait for Items counts equal to kv_store size of buckets.
            5. Verify items value on each bucket.
            6. Verify Revision id of each item.
        """
        skip_key_validation = self._input.param("skip_key_validation", False)
        self.__merge_all_buckets()
        for cb_cluster in self.get_cb_clusters():
            for remote_cluster_ref in cb_cluster.get_remote_clusters():
                try:
                    src_cluster = remote_cluster_ref.get_src_cluster()
                    dest_cluster = remote_cluster_ref.get_dest_cluster()

                    if self._evict_with_compactor:
                        for b in src_cluster.get_buckets():
                            # only need to do compaction on the source cluster, evictions are propagated to the remote
                            # cluster
                            src_cluster.get_cluster().compact_bucket(
                                src_cluster.get_master_node(), b)

                    else:
                        src_cluster.run_expiry_pager()
                        dest_cluster.run_expiry_pager()

                    src_cluster.wait_for_flusher_empty()
                    dest_cluster.wait_for_flusher_empty()

                    src_dcp_queue_drained = src_cluster.wait_for_dcp_queue_drain(
                    )
                    dest_dcp_queue_drained = dest_cluster.wait_for_dcp_queue_drain(
                    )

                    src_cluster.wait_for_outbound_mutations()
                    dest_cluster.wait_for_outbound_mutations()
                except Exception as e:
                    # just log any exception thrown, do not fail test
                    self.log.error(e)
                if not skip_key_validation:
                    try:
                        if not sg_run:
                            src_active_passed, src_replica_passed = \
                                src_cluster.verify_items_count(timeout=self._item_count_timeout)
                            dest_active_passed, dest_replica_passed = \
                                dest_cluster.verify_items_count(timeout=self._item_count_timeout)

                        src_cluster.verify_data(
                            max_verify=self._max_verify,
                            skip=skip_verify_data,
                            only_store_hash=self.only_store_hash)
                        dest_cluster.verify_data(
                            max_verify=self._max_verify,
                            skip=skip_verify_data,
                            only_store_hash=self.only_store_hash)
                        for _, cluster in enumerate(self.get_cb_clusters()):
                            for bucket in cluster.get_buckets():
                                h = httplib2.Http(".cache")
                                resp, content = h.request(
                                    "http://{0}:4984/db/_all_docs".format(
                                        cluster.get_master_node().ip))
                                self.assertEqual(
                                    json.loads(content)['total_rows'],
                                    self._num_items)
                                client = SDKClient(
                                    scheme="couchbase",
                                    hosts=[cluster.get_master_node().ip],
                                    bucket=bucket.name).cb
                                for i in range(self._num_items):
                                    key = 'k_%s_%s' % (i, str(cluster).replace(
                                        ' ', '_').replace('.', '_').replace(
                                            ',', '_').replace(':', '_'))
                                    res = client.get(key)
                                    for xk, xv in res.value.items():
                                        rv = client.mutate_in(
                                            key, SD.get(xk, xattr=True))
                                        self.assertTrue(rv.exists(xk))
                                        self.assertEqual(xv, rv[xk])
                                    if sg_run:
                                        resp, content = h.request(
                                            "http://{0}:4984/db/{1}".format(
                                                cluster.get_master_node().ip,
                                                key))
                                        self.assertEqual(
                                            json.loads(content)['_id'], key)
                                        self.assertEqual(
                                            json.loads(content)[xk], xv)
                                        self.assertTrue('2-' in json.loads(
                                            content)['_rev'])
                    except Exception as e:
                        self.log.error(e)
                    finally:
                        if not sg_run:
                            rev_err_count = self.verify_rev_ids(
                                remote_cluster_ref.get_replications(),
                                skip=skip_verify_revid)
                            # we're done with the test, now report specific errors
                            if (not (src_active_passed and dest_active_passed)) and \
                                    (not (src_dcp_queue_drained and dest_dcp_queue_drained)):
                                self.fail(
                                    "Incomplete replication: Keys stuck in dcp queue"
                                )
                            if not (src_active_passed and dest_active_passed):
                                self.fail(
                                    "Incomplete replication: Active key count is incorrect"
                                )
                            if not (src_replica_passed
                                    and dest_replica_passed):
                                self.fail(
                                    "Incomplete intra-cluster replication: "
                                    "replica count did not match active count")
                            if rev_err_count > 0:
                                self.fail(
                                    "RevID verification failed for remote-cluster: {0}"
                                    .format(remote_cluster_ref))

        # treat errors in self.__report_error_list as failures
        if len(self.get_report_error_list()) > 0:
            error_logger = self.check_errors_in_goxdcr_logs()
            if error_logger:
                self.fail("Errors found in logs : {0}".format(error_logger))
Beispiel #31
0
 def read(self, key: str, field: str):
     self.client.lookup_in(key, subdocument.get(path=field))
Beispiel #32
0
 def read_xattr(self, key: str, field: str):
     self.client.lookup_in(key, subdocument.get(path=field, xattr=True))
Beispiel #33
0
 def read(self, key, subdoc_fields):
     for field in subdoc_fields.split(','):
         self.client.lookup_in(key, SD.get(field))
Beispiel #34
0
 def read(self, key, subdoc_fields):
     for field in subdoc_fields.split(','):
         self.client.lookup_in(key, SD.get(field))
Beispiel #35
0
 def sd_get(self, key, path):
     return self._cb.lookup_in(key, SD.get(path))
        ],
        "abandoned": [
            157, 42, 999
        ]
    }
}

try:
    collection.insert("customer123", json_doc)
except DocumentExistsException:
    collection.remove("customer123")
    collection.insert("customer123", json_doc)

# tag::lookup_in[]
result = collection.lookup_in("customer123",
                              [SD.get("addresses.delivery.country")])
country = result.content_as[str](0)  # "United Kingdom"
# end::lookup_in[]
print(country)

# fixed in v. 3.1.0; prior to 3.1. result.exists(index)
#   would throw an exception if the path did not exist
# tag::lookup_in_exists[]
result = collection.lookup_in(
    "customer123", [
        SD.exists("purchases.pending[-1]")])
print('Path exists: {}.'.format(result.exists(0)))
# Path exists:  False.
# end::lookup_in_exists[]

# NOTE:  result.content_as[bool](1) would return False
Beispiel #37
0
 def read(self, key: str, field: str):
     self.client.lookup_in(key, subdocument.get(path=field))
Beispiel #38
0
 def read_xattr(self, key: str, field: str):
     self.client.lookup_in(key, subdocument.get(path=field,
                                                xattr=True))
Beispiel #39
0
def subdocument_get(bucket, path, document_id):
    rv = bucket.lookup_in(
        document_id, SD.get(path)
    )  # don't convert the dict to json.  the method will automatically
    return rv[0]
Beispiel #40
0
The latter saves even more bandwidth by not retrieving the contents of the path if it is not needed.

.Retrieve sub-document value
[source,csharp]
----
"""

#tag::content_as[]
import couchbase.collection
import couchbase.subdocument as SD
from couchbase.durability import Durability

collection = couchbase.collection.Collection()

result = collection.lookup_in("customer123",
                              [SD.get("addresses.delivery.country")])
country = result.content_as[str](0)  # "United Kingdom"
#end::content_as[]
"""
----

.Check existence of sub-document path
[source,csharp]
----
"""
#tag::exists[]
result = collection.lookup_in("customer123",
                              [SD.exists("purchases.pending[-1]")])
print("Path exists? {}".format(result.content_as[bool](0)))
"""
# Path exists? false
import couchbase.subdocument as SD

cb = Bucket('couchbase://localhost/default')

cb.upsert('docid', {'name': 'Mark', 'email': '*****@*****.**', 'array': [1, 2, 3, 4]})

# Do it the simple way:
rv = cb.retrieve_in('docid', 'name', 'array[1]')
print('Name is: {0}, array[1] is: {1}'.format(rv[0], rv[1]))

# If all results are successful:
name, array_2ndelem = rv
print('Name is: {0}, Array[1] is: {1}'.format(name, array_2ndelem))

# Perform mixed-mode operations
rv = cb.lookup_in('docid', SD.get('name'), SD.get('array[1]'),
                  SD.exists('non-exist'))
print('Name is', rv[0])
print('Array[1] is', rv[1])
print('non-exist exists?', rv.exists(2))

# See what happens when we try to reference a failed path:
try:
    rv[2]
except E.SubdocPathNotFoundError:
    print('Using subscript access raises exception for missing item')

# If we try to get a non-existent document, it will fail as normal
try:
    cb.retrieve_in('non-exist', 'pth1', 'pth2', 'pth3')
except E.NotFoundError: