示例#1
0
 def test_tracing_threshold_analytics(self):
     timeout = timedelta(seconds=0.3)
     opt = ClusterTracingOptions(tracing_threshold_analytics=timeout)
     opts = self._create_cluster_opts(tracing_options=opt)
     cluster = Cluster.connect(self.cluster.connstr, opts,
                               **self._mock_hack())
     self.assertEqual(timeout, cluster.tracing_threshold_analytics)
示例#2
0
 def test_query_default_timeout(self):
     timeout = timedelta(seconds=50)
     opts = self._create_cluster_opts(timeout_options=ClusterTimeoutOptions(
         query_timeout=timeout))
     cluster = Cluster.connect(self.cluster.connstr, opts,
                               **self._mock_hack())
     self.assertEqual(timeout, cluster.query_timeout)
 def test_can_do_admin_calls_after_unsuccessful_bucket_openings(self):
     if self.is_mock:
         raise SkipTest("mock doesn't support admin calls")
     cluster = Cluster.connect(self.cluster.connstr, self._create_cluster_opts(), **self._mock_hack())
     self.assertRaises(BucketNotFoundException, cluster.bucket, "flkkjkjk")
     self.assertIsNotNone(cluster.bucket(self.bucket_name))
     self.assertIsNotNone(cluster.query_indexes().get_all_indexes(self.bucket_name))
示例#4
0
    def test_async(self):
      print("blockingtoasync")
      #tag::blockingtoasync[]
      cluster = Cluster.connect("couchbase://localhost", ClusterOptions(PasswordAuthenticator("username", "password")))
      bucket = cluster.bucket("travel-sample")

      # Same API as Bucket, but completely async with asyncio Futures
      from acouchbase.bucket import Bucket
      async_bucket=Bucket("couchbase://localhost/default")

      cluster.disconnect()
      #end::blockingtoasync[]

      print("reactivecluster")
      #tag::reactivecluster[]
      from acouchbase.bucket import Bucket
      cluster = Cluster("couchbase://localhost", ClusterOptions(PasswordAuthenticator("username", "password")),bucket_class=Bucket)
      bucket = cluster.bucket("travel-sample")

      # A reactive cluster's disconnect methods returns a Mono<Void>.
      # Nothing actually happens until you subscribe to the Mono.
      # The simplest way to subscribe is to await completion by calling call `block()`.
      cluster.disconnect()
      #end::reactivecluster[]

      print("asynccluster")
      #tag::asynccluster[]
      cluster = Cluster.connect("couchbase://localhost", ClusterOptions(PasswordAuthenticator("username", "password")))
      bucket = cluster.bucket("travel-sample")

      # An async cluster's disconnect methods returns a CompletableFuture<Void>.
      # The disconnection starts as soon as you call disconnect().
      # The simplest way to wait for the disconnect to complete is to call `join()`.
      cluster.disconnect().join()
      #end::asynccluster[]

      print("tls")
      #tag::tls[]
      cluster = Cluster("couchbases://localhost",ClusterOptions(PasswordAuthenticator("username","password",cert_path="/path/to/cluster.crt")))
      #end::tls[]

      print("dnssrv")
      #tag::dnssrv[]
      env = ClusterEnvironment.builder() \
          .ioConfig(IoConfig.enableDnsSrv(true)) \
          .build()
def get_cluster():
    opts = ClusterOptions(
        authenticator=PasswordAuthenticator("Administrator", "password"),
        transaction_config=TransactionConfig(
            durability=ServerDurability(DurabilityLevel.PERSIST_TO_MAJORITY)))

    example_cluster = Cluster.connect('couchbase://localhost', opts)
    return example_cluster
示例#6
0
 def test_can_override_cluster_options(self):
     compression = Compression.FORCE
     compression2 = Compression.IN
     opts = self._create_cluster_opts(compression=compression)
     args = self._mock_hack()
     args.update({'compression': compression2})
     cluster = Cluster.connect(self.cluster.connstr, opts, **args)
     self.assertEqual(compression2, cluster.compression)
 def test_tracing_threshold_queue_size(self):
     size = 100
     opt = ClusterTracingOptions(tracing_threshold_queue_size=size)
     opts = self._create_cluster_opts(tracing_options=opt)
     cluster = Cluster.connect(self.cluster.connstr, opts, **self._mock_hack())
     self.assertEqual(size, cluster.tracing_threshold_queue_size)
     b = cluster.bucket(self.bucket_name)
     self.assertEqual(size, b.tracing_threshold_queue_size)
示例#8
0
 def test_views_default_timeout(self):
     timeout = timedelta(seconds=50)
     opts = self._create_cluster_opts(timeout_options=ClusterTimeoutOptions(
         views_timeout=timeout))
     cluster = Cluster.connect(self.cluster.connstr, opts,
                               **self._mock_hack())
     b = cluster.bucket(self.bucket_name)
     self.assertEqual(timeout, b.views_timeout)
 def test_tracing_threshold_queue_flush_interval(self):
     timeout = timedelta(seconds=10)
     opt = ClusterTracingOptions(tracing_threshold_queue_flush_interval=timeout)
     opts = self._create_cluster_opts(tracing_options=opt)
     cluster = Cluster.connect(self.cluster.connstr, opts, **self._mock_hack())
     self.assertEqual(timeout, cluster.tracing_threshold_queue_flush_interval)
     b = cluster.bucket(self.bucket_name)
     self.assertEqual(timeout, b.tracing_threshold_queue_flush_interval)
 def can_do_admin_calls_after_unsuccessful_bucket_openings(self):
     cluster = Cluster.connect(self.cluster.connstr,
                               self._create_cluster_opts(),
                               **self._mock_hack())
     self.assertRaises(BucketNotFoundException, cluster.bucket, "flkkjkjk")
     self.assertIsNotNone(cluster.bucket(self.bucket_name))
     self.assertIsNotNone(cluster.query_indexes().list_all_indexes(
         self.bucket_name))
 def test_can_override_timeout_options(self):
     timeout = timedelta(seconds=100)
     timeout2 = timedelta(seconds=50)
     opts = self._create_cluster_opts(timeout_options=ClusterTimeoutOptions(kv_timeout=timeout))
     args = self._mock_hack()
     args.update({'timeout_options': ClusterTimeoutOptions(kv_timeout=timeout2)})
     cluster = Cluster.connect(self.cluster.connstr, opts, **args)
     b = cluster.bucket(self.bucket_name)
     self.assertEqual(timeout2, b.kv_timeout)
 def test_can_override_tracing_options(self):
     timeout = timedelta(seconds=50)
     timeout2 = timedelta(seconds=100)
     opts = self._create_cluster_opts(
         tracing_options=ClusterTracingOptions(tracing_orphaned_queue_flush_interval=timeout))
     args = self._mock_hack()
     args.update({'tracing_options': ClusterTracingOptions(tracing_orphaned_queue_flush_interval=timeout2)})
     cluster = Cluster.connect(self.cluster.connstr, opts, **args)
     self.assertEqual(timeout2, cluster.tracing_orphaned_queue_flush_interval)
     b = cluster.bucket(self.bucket_name)
     self.assertEqual(timeout2, b.tracing_orphaned_queue_flush_interval)
 def test_disconnect(self):
     # for this test we need a new cluster...
     if self.is_mock:
         raise SkipTest("query not mocked")
     cluster = Cluster.connect(self.cluster.connstr, ClusterOptions(
         PasswordAuthenticator(self.cluster_info.admin_username, self.cluster_info.admin_password)))
     # Temporarily, lets open a bucket to insure the admin object was created
     b = cluster.bucket(self.bucket_name)
     # verify that we can get a bucket manager
     self.assertIsNotNone(cluster.buckets())
     # disconnect cluster
     cluster.disconnect()
     self.assertRaises(AlreadyShutdownException, cluster.buckets)
 def test_cluster_may_need_open_bucket_before_admin_calls(self):
     # NOTE: some admin calls -- like listing query indexes, seem to require
     # that the admin was given a bucket.  That can only happen if we have already
     # opened a bucket, which is what usually happens in the tests.  This does not, and
     # checks for the exception when appropriate.
     if self.is_mock:
         raise SkipTest("mock doesn't support the admin call we are making")
     cluster = Cluster.connect(self.cluster.connstr, self._create_cluster_opts(), **self._mock_hack())
     if cluster._is_6_5_plus():
         self.assertIsNotNone(cluster.query_indexes().get_all_indexes(self.bucket_name))
     else:
         # since we called cluster._is_6_5_plus(), that creates an admin under the hood to do
         # the http call.  Thus, we won't get the NoBucketException in this case, we get an
         # NotSupportedException instead.  Normally, one would use the public api and not hit that,
         # getting the NoBucketException instead.
         self.assertRaises(NotSupportedException, cluster.query_indexes().get_all_indexes, self.bucket_name)
 def test_cluster_may_need_open_bucket_before_admin_calls(self):
     # NOTE: some admin calls -- like listing query indexes, seem to require
     # that the admin was given a bucket.  That can only happen if we have already
     # opened a bucket, which is what usually happens in the tests.  This does not, and
     # checks for the exception when appropriate.
     if self.is_mock:
         raise SkipTest("mock doesn't support the admin call we are making")
     cluster = Cluster.connect(self.cluster.connstr,
                               self._create_cluster_opts(),
                               **self._mock_hack())
     if cluster._is_6_5_plus():
         self.assertIsNotNone(cluster.query_indexes().get_all_indexes(
             self.bucket_name))
     else:
         self.assertRaises(NoBucketException,
                           cluster.query_indexes().list_all_indexes,
                           self.bucket_name)
示例#16
0
    def test_simpleconnect(self):

      #tag::simpleconnect[]
      cluster = Cluster.connect("127.0.0.1", ClusterOptions(PasswordAuthenticator("username", "password")))
      bucket = cluster.bucket("travel-sample")
      collection = bucket.default_collection()

      # You can access multiple buckets using the same Cluster object.
      another_bucket = cluster.bucket("beer-sample")

      # You can access collections other than the default
      # if your version of Couchbase Server supports this feature.
      customer_a = bucket.scope("customer-a")
      widgets = customer_a.collection("widgets")

      #end::simpleconnect[]

      # For a graceful shutdown, disconnect from the cluster when the program ends.
      cluster.disconnect()
示例#17
0
    def main(self, args):
        # tag::simple[]
        cluster = Cluster.connect(
            "localhost",
            ClusterOptions(PasswordAuthenticator("Administrator", "password")))
        try:
            result = cluster.analytics_query("select \"hello\" as greeting")

            for row in result.rows():
                print("Found row: " + row)

            print("Reported execution time: " +
                  result.metaData().metrics().executionTime())
        except CouchbaseException as ex:
            import traceback
            traceback.print_exc()
        # end::simple[]

        # tag::named[]
        result = cluster.analytics_query(
            "select count(*) from airports where country = $country",
            country="France")

        # end::named[]

        # tag::positional[]
        result = cluster.analytics_query(
            "select count(*) from airports where country = ?", "France")
        # end::positional[]

        # TODO: uncoment pending https://issues.couchbase.com/browse/PYCBC-976
        # # tag::scanconsistency[]
        # result = cluster.analytics_query(
        #     "select ...",
        #     scan_consistency=AnalyticsScanConsistency.REQUEST_PLUS)
        # # end::scanconsistency[]

        # tag::clientcontextid[]
        import uuid
        result = cluster.analyticsQuery(
            "select ...",
            AnalyticsOptions(
                client_context_id="user-44{}".format(uuid.uuid4())))

        # end::clientcontextid[]

        # tag::priority[]
        result = cluster.analytics_query("select ...",
                                         AnalyticsOptions(priority=True))
        # end::priority[]

        # tag::readonly[]
        result = cluster.analytics_query("select ...", readonly=True)
        # end::readonly[]

        # TODO: uncomment pending https://issues.couchbase.com/browse/PYCBC-977
        # # tag::printmetrics[]
        # result = cluster.analytics_query("select 1=1")  # type: Union[AnalyticsResult,IterableWrapper]
        # print(
        #     "Execution time: " + result.metadata().metrics().executionTime()
        # )
        # # end::printmetrics[]
        #

        # tag::rowsasobject[]
        result = cluster.analytics_query(
            "select * from `travel-sample` limit 10")
        for row in result.rows():
            print("Found row: " + row)
示例#18
0
import uuid

from couchbase.mutation_state import MutationState
from couchbase.cluster import QueryScanConsistency
# tag::n1ql_basic_example[]
from couchbase.cluster import Cluster, ClusterOptions, QueryOptions
from couchbase.auth import PasswordAuthenticator
from couchbase.exceptions import CouchbaseException

cluster = Cluster.connect(
    "couchbase://localhost",
    ClusterOptions(PasswordAuthenticator("Administrator", "password")))
bucket = cluster.bucket("travel-sample")
collection = bucket.default_collection()

try:
    result = cluster.query(
        "SELECT * FROM `travel-sample`.inventory.airport LIMIT 10",
        QueryOptions(metrics=True))

    for row in result.rows():
        print("Found row: {}".format(row))

    print("Report execution time: {}".format(
        result.metadata().metrics().execution_time()))

except CouchbaseException as ex:
    import traceback
    traceback.print_exc()

# end::n1ql_basic_example[]
示例#19
0
 def test_compression_min_ratio(self):
     ratio = 0.5
     opts = self._create_cluster_opts(compression_min_ratio=ratio)
     cluster = Cluster.connect(self.cluster.connstr, opts,
                               **self._mock_hack())
     self.assertEqual(ratio, cluster.compression_min_ratio)
示例#20
0
 def test_redaction(self):
     opts = self._create_cluster_opts(log_redaction=True)
     cluster = Cluster.connect(self.cluster.connstr, opts,
                               **self._mock_hack())
     self.assertTrue(cluster.redaction)
示例#21
0
 def test_compression(self):
     compression = Compression.FORCE
     opts = self._create_cluster_opts(compression=compression)
     cluster = Cluster.connect(self.cluster.connstr, opts,
                               **self._mock_hack())
     self.assertEqual(compression, cluster.compression)
示例#22
0
 def test_compression_min_size(self):
     size = 5000
     opts = self._create_cluster_opts(compression_min_size=size)
     cluster = Cluster.connect(self.cluster.connstr, opts,
                               **self._mock_hack())
     self.assertEqual(size, cluster.compression_min_size)
示例#23
0
 def test_connectionstringparams(self):
   print("connectionstringparams")
   #tag::connectionstringparams[]
   cluster = Cluster.connect(
       "couchbase://localhost?compression=on&log_redaction=on", ClusterOptions(PasswordAuthenticator("username", "password")))
示例#24
0
 def test_multinodeconnect(self):
   print("multinodeconnect")
   #tag::multinodeconnect[]
   cluster = Cluster.connect("couchbase://node1.example.com,node2.example.com", ClusterOptions(PasswordAuthenticator("username", "password")))
示例#25
0
from couchbase.mutation_state import  MutationState
from couchbase.cluster import Cluster, ClusterOptions
from couchbase.auth import PasswordAuthenticator
cluster=Cluster.connect("localhost", ClusterOptions(PasswordAuthenticator("user","pass")))
bucket=cluster.bucket("default")
collection=bucket.default_collection()
#tag::positional[]
result = cluster.query(
    "SELECT x.* FROM `default` WHERE x.Type=$1",
    'User')
#end::positional[]

#tag::named[]
result = cluster.query(
    "SELECT x.* FROM `default` WHERE x.Type=$type",
    type='User')
#end::named[]

#tag::iterating[]

result = cluster.query(
    "SELECT x.* FROM `default` WHERE x.Type=$1",
    'User')

# iterate over rows
for row in result:
    # each row is an instance of the query call
    name = row['username']
    age = row['age']
#end::iterating[]
def main():
    # tag::config[]
    opts = ClusterOptions(
        authenticator=PasswordAuthenticator("Administrator", "password"),
        transaction_config=TransactionConfig(
            durability=ServerDurability(DurabilityLevel.PERSIST_TO_MAJORITY)))

    cluster = Cluster.connect('couchbase://localhost', opts)
    # end::config[]

    test_doc = "foo"

    # tag::ts-bucket[]
    # get a reference to our bucket
    bucket = cluster.bucket("travel-sample")
    # end::ts-bucket[]

    # tag::ts-collection[]
    # get a reference to our collection
    collection = bucket.scope("inventory").collection("airline")
    # end::ts-collection[]

    # tag::ts-default-collection[]
    # get a reference to the default collection, required for older Couchbase server versions
    collection_default = bucket.default_collection()
    # tag::ts-default-collection[]

    # Set up for what we'll do below
    remove_or_warn(collection, 'doc-a')
    remove_or_warn(collection, 'doc-b')
    remove_or_warn(collection, 'doc-c')
    remove_or_warn(collection, test_doc)
    remove_or_warn(collection, 'docId')

    # await collection.upsert("doc-a", {})
    collection.upsert('doc-b', {})
    collection.upsert('doc-c', {})
    collection.upsert('doc-id', {})
    collection.upsert('a-doc', {})

    def txn_insert(ctx):
        ctx.insert(collection, test_doc, 'hello')

    try:
        cluster.transactions.run(txn_insert)
    except TransactionFailed as ex:
        print(f'Transaction did not reach commit point.  Error: {ex}')
    except TransactionCommitAmbiguous as ex:
        print(f'Transaction possibly committed.  Error: {ex}')

    # tag::create[]
    def txn_logic_ex(ctx  # type: AttemptContext
                     ):
        """
        … Your transaction logic here …
        """

    try:
        """
        'txn_logic_ex' is a Python closure that takes an AttemptContext. The
        AttemptContext permits getting, inserting, removing and replacing documents,
        performing N1QL queries, etc.

        Committing is implicit at the end of the closure.
        """
        cluster.transactions.run(txn_logic_ex)
    except TransactionFailed as ex:
        print(f'Transaction did not reach commit point.  Error: {ex}')
    except TransactionCommitAmbiguous as ex:
        print(f'Transaction possibly committed.  Error: {ex}')
    # end::create[]

    # tag::examples[]
    inventory = cluster.bucket("travel-sample").scope("inventory")

    def txn_example(ctx):
        # insert doc
        ctx.insert(collection, 'doc-a', {})

        # get a doc
        doc_a = ctx.get(collection, 'doc-a')

        # replace a doc
        doc_b = ctx.get(collection, 'doc-b')
        content = doc_b.content_as[dict]
        content['transactions'] = 'are awesome!'
        ctx.replace(doc_b, content)

        # remove a doc
        doc_c = ctx.get(collection, 'doc-c')
        ctx.remove(doc_c)

        # tag::scope-example[]
        # Added the above tag (scope-example) to ignore this section in the docs for now.
        # Once the below TODO is addressed we can remove the tag completely.
        # N1QL query
        # @TODO:  clean up txns query options, scope, pos args and named args won't work
        # query_str = 'SELECT * FROM hotel WHERE country = $1 LIMIT 2'
        # res = ctx.query(query_str,
        #         TransactionQueryOptions(scope=inventory,
        #                                 positional_args = ['United Kingdom']))
        # end::scope-example[]
        query_str = 'SELECT * FROM `travel-sample`.inventory.hotel WHERE country = "United Kingdom" LIMIT 2;'
        res = ctx.query(query_str)
        rows = [r for r in res.rows()]

        query_str = 'UPDATE `travel-sample`.inventory.route SET airlineid = "airline_137" WHERE airline = "AF"'
        res = ctx.query(query_str)
        rows = [r for r in res.rows()]

    try:
        cluster.transactions.run(txn_example)
    except TransactionFailed as ex:
        print(f'Transaction did not reach commit point.  Error: {ex}')
    except TransactionCommitAmbiguous as ex:
        print(f'Transaction possibly committed.  Error: {ex}')
    # end::examples[]

    # execute other examples
    try:
        print('transaction - get')
        get(cluster, collection, 'doc-a')
        # be sure to use a new key here...
        print('transaction - get w/ read own writes')
        get_read_own_writes(cluster, collection, 'doc-id2',
                            {'some': 'content'})
        print('transaction - replace')
        replace(cluster, collection, 'doc-id')
        print('transaction - remove')
        remove(cluster, collection, 'doc-id')
        print('transaction - insert')
        insert(cluster, collection, 'doc-id', {'some': 'content'})
        print("transaction - query_examples")
        query_examples(cluster)
    except TransactionFailed as ex:
        print(f'Txn did not reach commit point.  Error: {ex}')
    except TransactionCommitAmbiguous as ex:
        print(f'Txn possibly committed.  Error: {ex}')
示例#27
0
# Cluster.connect("127.0.0.1", "user", "pass");
#
# #This is equivalent to
# ClusterEnvironment env = ClusterEnvironment \
#     .builder() \
#     .ioConfig(IoConfig.maxHttpConnections(23)) \
#     .build();
#naend::sysprops[]

from couchbase.cluster import *
from couchbase_core.cluster import PasswordAuthenticator
from couchbase_v2 import COMPRESS_INOUT
#tag::connstr[]
# Will set the compression type to inout
Cluster.connect(
    "couchbases://127.0.0.1?compression=inout",ClusterOptions(PasswordAuthenticator(
    "user",
    "pass")))

# This is equivalent to
collection.compression = COMPRESS_INOUT
#end::connstr[]

#tag::rbac[]
# add convenience overload when available
Cluster.connect("couchbases://127.0.0.1", ClusterOptions(PasswordAuthenticator("username", "password")))
#end::rbac[]

#tag::rbac-full[]
Cluster.connect(
    "couchbases://127.0.0.1",
    ClusterOptions(PasswordAuthenticator("username", "password")))
def run_quick_test():
    mock_server = create_mock_server()
    bucket_name = "default"
    port = mock_server.rest_port
    host = "127.0.0.1"
    username = "******"
    pw = "password"

    conn_string = "http://{}:{}".format(host, port)
    opts = ClusterOptions(ClassicAuthenticator(username, pw))
    kwargs = {"bucket": bucket_name}
    cluster = Cluster.connect(connection_string=conn_string,
                              options=opts,
                              **kwargs)
    bucket = cluster.bucket(bucket_name)
    collection = bucket.default_collection()

    key = "test-key"
    doc = {
        "a": "aa",
        "b": 1,
        "c": ["Hello,", "World!"],
        "d": {
            "e": "fgh"
        },
        "what": "insert"
    }

    try:
        collection.remove(key)
    except DocumentNotFoundException:
        pass

    res = collection.insert(key, doc)
    assert res.cas is not None

    res = collection.get(key)
    assert res.content_as[dict] == doc

    raised_exception = False
    try:
        collection.insert(key, doc)
    except DocumentExistsException:
        raised_exception = True

    assert raised_exception is True

    doc["what"] = "upsert"
    res = collection.upsert(key, doc)
    assert res.cas is not None

    res = collection.get(key)
    assert res.content_as[dict] == doc

    doc["what"] = "replace"
    res = collection.replace(key, doc)
    assert res.cas is not None

    res = collection.get(key)
    assert res.content_as[dict] == doc

    res = collection.remove(key)
    assert res.cas is not None

    raised_exception = False
    try:
        collection.remove(key)
    except DocumentNotFoundException:
        raised_exception = True

    assert raised_exception is True
示例#29
0
    def test_multinodeconnect(self):
      #tag::multinodeconnect[]
      cluster = Cluster.connect("couchbase://192.168.56.101,192.168.56.102", ClusterOptions(PasswordAuthenticator("username", "password")))
      #end::multinodeconnect[]

    #
    # def test_customenv(self):
    #   #tag::customenv[]
    #   env = ClusterEnvironment.builder()
    #       .build();
    #   # Customize client settings by calling methods on the builder
    #
    #   # Create a cluster using the environment's custom client settings.
    #   cluster = Cluster.connect("127.0.0.1", ClusterOptions
    #       .clusterOptions("username", "password")
    #       .environment(env))
    #
    #   # Shut down gracefully. Shut down the environment
    #   # after all associated clusters are disconnected.
    #   cluster.disconnect()
    #   env.shutdown()
    #   #end::customenv[]
    #
    #
    # def test_shareclusterenv(self):
    #   #tag::shareclusterenvironment[]
    #   env = ClusterEnvironment.builder()\
    #       .timeoutConfig(TimeoutConfig.kvTimeout(Duration.ofSeconds(5)))\
    #       .build()
    #
    #   clusterA = Cluster.connect(
    #       "clusterA.example.com",
    #       ClusterOptions("username", "password")
    #           .environment(env));
    #
    #   clusterB = Cluster.connect(
    #       "clusterB.example.com",
    #       ClusterOptions("username", "password")
    #           .environment(env));

    # # ...
    #
    #   # For a graceful shutdown, disconnect from the clusters
    #   # AND shut down the custom environment when then program ends.
    #   clusterA.disconnect();
    #   clusterB.disconnect();
    #   env.shutdown();
    #end::shareclusterenvironment[]

#
#     // todo use this example when beta 2 is released.
# //    {
# //      // #tag::seednodes[]
# //      int customKvPort = 12345;
# //      int customManagerPort = 23456
# //      Set<SeedNode> seedNodes = new HashSet<>(Arrays.asList(
# //          SeedNode.create("127.0.0.1",
# //              Optional.of(customKvPort),
# //              Optional.of(customManagerPort))))
# //
#
# //      Cluster cluster = Cluster.connect(seedNodes, "username", "password")
# //      // #end::customconnect[]
# //    }

      #tag::connectionstringparams[]
      cluster = Cluster.connect(
          "couchbases://127.0.0.1?compression=inout&log_redaction=on", ClusterOptions(PasswordAuthenticator("username", "password")))
      #end::connectionstringparams[]

      #tag::blockingtoasync[]
      cluster = Cluster.connect("127.0.0.1", ClusterOptions(PasswordAuthenticator("username", "password")))
      bucket = cluster.bucket("travel-sample")

      # Same API as Bucket, but completely async with asyncio Futures
      from acouchbase.bucket import Bucket
      async_bucket=Bucket("couchbase://127.0.0.1/default")

      cluster.disconnect()
      #end::blockingtoasync[]

      #tag::reactivecluster[]
      from acouchbase.bucket import Bucket
      cluster = Cluster("127.0.0.1", ClusterOptions(PasswordAuthenticator("username", "password")),bucket_class=Bucket)
      bucket = cluster.bucket("travel-sample")

      # A reactive cluster's disconnect methods returns a Mono<Void>.
      # Nothing actually happens until you subscribe to the Mono.
      # The simplest way to subscribe is to await completion by calling call `block()`.
      cluster.disconnect()
      #end::reactivecluster[]

      #tag::asynccluster[]
      cluster = Cluster.connect("127.0.0.1", ClusterOptions(PasswordAuthenticator("username", "password")))
      bucket = cluster.bucket("travel-sample")

      # An async cluster's disconnect methods returns a CompletableFuture<Void>.
      # The disconnection starts as soon as you call disconnect().
      # The simplest way to wait for the disconnect to complete is to call `join()`.
      cluster.disconnect().join()
      #end::asynccluster[]


      #tag::tls[]
      cluster = Cluster("couchbases://127.0.0.1",ClusterOptions(PasswordAuthenticator("username","password",cert_path="/path/to/cluster.crt")))
      #end::tls[]

      #tag::dnssrv[]
      env = ClusterEnvironment.builder()
          .ioConfig(IoConfig.enableDnsSrv(true))
          .build()