def sdk3_to_sdk2_durability(durability, num_replicas): if durability == Durability.NONE: return ClientDurability(PersistTo.NONE, ReplicateTo.NONE) if durability == Durability.MAJORITY: return ClientDurability(replicate_to=ReplicateTo(int((num_replicas+1)/2)), persist_to=PersistTo.NONE) if durability == Durability.MAJORITY_AND_PERSIST_TO_ACTIVE: return ClientDurability(replicate_to=ReplicateTo(int((num_replicas+1)/2)), persist_to=PersistTo.ONE) if durability == Durability.PERSIST_TO_MAJORITY: return ClientDurability(persist_to=PersistTo(int((num_replicas+1)/2 + 1)), replicate_to=ReplicateTo.NONE)
async def _mset(self, key, doc, ttl=0, persist_to=0, replicate_to=0, durability_level=Durability.NONE): try: if persist_to != 0 or replicate_to != 0: durability = ClientDurability( replicate_to=Cardinal(replicate_to), persist_to=Cardinal(persist_to)) else: durability = ServerDurability(durability_level) if ttl != 0: timedelta = datetime.timedelta(ttl) upsert_options = UpsertOptions(expiry=timedelta, durability=durability) else: upsert_options = UpsertOptions(durability=durability) await self.cb.upsert(key, doc, options=upsert_options) except TemporaryFailException: logging.warn("temp failure during mset - cluster may be unstable") except TimeoutException: logging.warn(f"[{self.name}] cluster timed trying to handle mset") except NetworkException as nx: logging.error("network error") logging.error(nx) except Exception as ex: logging.error(ex)
def test_client_durable_insert(self): num_replicas = self.bucket._bucket.configured_replica_count durability = ClientDurability(persist_to=PersistTo.ONE, replicate_to=ReplicateTo(num_replicas)) self.cb.insert(self.NOKEY, self.CONTENT, InsertOptions(durability=durability)) result = self.cb.get(self.NOKEY) self.assertEqual(self.CONTENT, result.content_as[dict])
def test_mutate_in_durability(self): if self.is_mock: raise SkipTest( "mock doesn't support getting xattrs (like $document.expiry)") self.assertRaises( DurabilityImpossibleException, self.coll.mutate_in, self.KEY, ( SD.upsert("c", "ccc"), SD.replace("b", "XXX"), ), MutateInOptions(durability=ClientDurability(replicate_to=5)))
def test_client_durable_replace(self): num_replicas = self.bucket._bucket.configured_replica_count content = {"new": "content"} durability = ClientDurability(persist_to=PersistTo.ONE, replicate_to=ReplicateTo(num_replicas)) self.cb.replace(self.KEY, content, ReplaceOptions(durability=durability)) result = self.cb.get(self.KEY) self.assertEqual(content, result.content_as[dict])
def test_scenario_C_clientSideDurability(self): """ Scenario C: 1) Remove a document with Durability Requirements, both variants, thinking about error handling """ # Use a helper wrapper to retry our operation in the face of durability failures # remove is idempotent iff the app guarantees that the doc's id won't be reused (e.g. if it's a UUID). This seems # a reasonable restriction. self.coll.upsert("id","test") self.assertEqual(self.coll.get("id").content_as[str],"test") try: self.retry_idempotent_remove_client_side(lambda replicateTo: self.coll.remove("id", RemoveOptions(durability=ClientDurability(replicateTo, PersistTo.ONE))), ReplicateTo.TWO, ReplicateTo.TWO, datetime.datetime.now() + timedelta(seconds=30)) except NotSupportedException as f: raise SkipTest("Using a ClientDurability should work, but it doesn't: {}".format(str(f)))
def test_client_durable_remove(self): num_replicas = self.bucket._bucket.configured_replica_count durability = ClientDurability(persist_to=PersistTo.ONE, replicate_to=ReplicateTo(num_replicas)) self.cb.remove(self.KEY, RemoveOptions(durability=durability)) self.assertRaises(DocumentNotFoundException, self.cb.get, self.KEY)
# Upsert with Durability (Couchbase Server >= 6.5) level Majority document = dict(foo="bar", bar="foo") opts = UpsertOptions(durability=ServerDurability(Durability.MAJORITY)) result = collection.upsert("document-key", document, opts) # end::durability[] except CouchbaseException as ex: # we expect an exception on local/test host, as Durability requirement # requires appropriately configured cluster pass try: # tag::obs_durability[] # Upsert with observe based durability (Couchbase Server < 6.5) document = {"foo": "bar", "bar": "foo"} opts = UpsertOptions( durability=ClientDurability(ReplicateTo.ONE, PersistTo.ONE)) result = collection.upsert("document-key", document, opts) # end::obs_durability[] except CouchbaseException as ex: # we expect an exception on local/test host, as Durability requirement # requires appropriately configured cluster pass # tag::get[] result = collection.get("document-key") print(result.content_as[dict]) # end::get[] # tag::get_timeout[] opts = GetOptions(timeout=timedelta(seconds=5)) result = collection.get("document-key", opts)
# tag::cas3[] collection.mutate_in( "customer123", [SD.array_append("purchases.complete", 999)], MutateInOptions(cas=1234)) # end::cas3[] except (DocumentExistsException, CASMismatchException) as ex: # we expect an exception here as the CAS value is chosen # for example purposes print(ex) try: # tag::obs_durability[] collection.mutate_in( "key", [SD.insert("username", "dreynholm")], MutateInOptions(durability=ClientDurability( ReplicateTo.ONE, PersistTo.ONE))) # end::obs_durability[] except CouchbaseException as ex: print('Need to have more than 1 node for durability') print(ex) try: # tag::durability[] collection.mutate_in( "customer123", [SD.insert("username", "dreynholm")], MutateInOptions(durability=ServerDurability( Durability.MAJORITY))) # end::durability[] except CouchbaseException as ex: print('Need to have more than 1 node for durability')