コード例 #1
0
def make_instance():
    global GLOBAL_INSTANCE
    if options.global_instance:
        if not GLOBAL_INSTANCE:
            GLOBAL_INSTANCE = Bucket(**CONN_OPTIONS)
        return GLOBAL_INSTANCE
    else:
        return Bucket(**CONN_OPTIONS)
コード例 #2
0
    async def initialize(self, loop=None):
        from acouchbase.bucket import Bucket

        self._cb = Bucket(os.path.join(self._dsn, self._bucket),
                          username=self._username,
                          password=self._password)
        await self._cb.connect()

        installed_indexes = []
        primary_installed = False
        async for row in self._cb.n1ql_query(
                N1QLQuery('select * from system:indexes')):
            if row['indexes']['namespace_id'] != self._bucket:
                continue
            if row['indexes'].get('is_primary'):
                primary_installed = True
            else:
                installed_indexes.append(
                    row['indexes']['index_key'][0].strip('`'))

        if len(installed_indexes) == 0:
            logger.info('Initializing bucket, can take some time')

        if not primary_installed:
            logger.warning('Creating primary index')
            async for row in self._cb.n1ql_query(  # noqa
                    'CREATE PRIMARY INDEX ON {bucket}'.format(
                        bucket=self._bucket)):
                pass

        for field in self._indexes_fields:
            if field in installed_indexes:
                continue
            statement = self._create_statement.format(bucket=self._bucket,
                                                      index_name=field,
                                                      field_name=field)
            logger.warning('Creating index {}'.format(statement))
            async for row in self._cb.n1ql_query(  # noqa
                    statement.format(bucket=self._bucket)):
                pass

        for field in get_index_fields():
            if 'json.{}'.format(field) in installed_indexes:
                continue
            statement = self._create_statement.format(bucket=self._bucket,
                                                      field='json.' + field,
                                                      index_name='json_' +
                                                      field)
            logger.warning('Creating index {}'.format(statement))
            async for row in self._cb.n1ql_query(  # noqa
                    statement.format(bucket=self._bucket)):
                pass
コード例 #3
0
    def test_async(self):
      print("blockingtoasync")
      #tag::blockingtoasync[]
      cluster = Cluster.connect("couchbase://localhost", ClusterOptions(PasswordAuthenticator("username", "password")))
      bucket = cluster.bucket("travel-sample")

      # Same API as Bucket, but completely async with asyncio Futures
      from acouchbase.bucket import Bucket
      async_bucket=Bucket("couchbase://localhost/default")

      cluster.disconnect()
      #end::blockingtoasync[]

      print("reactivecluster")
      #tag::reactivecluster[]
      from acouchbase.bucket import Bucket
      cluster = Cluster("couchbase://localhost", ClusterOptions(PasswordAuthenticator("username", "password")),bucket_class=Bucket)
      bucket = cluster.bucket("travel-sample")

      # A reactive cluster's disconnect methods returns a Mono<Void>.
      # Nothing actually happens until you subscribe to the Mono.
      # The simplest way to subscribe is to await completion by calling call `block()`.
      cluster.disconnect()
      #end::reactivecluster[]

      print("asynccluster")
      #tag::asynccluster[]
      cluster = Cluster.connect("couchbase://localhost", ClusterOptions(PasswordAuthenticator("username", "password")))
      bucket = cluster.bucket("travel-sample")

      # An async cluster's disconnect methods returns a CompletableFuture<Void>.
      # The disconnection starts as soon as you call disconnect().
      # The simplest way to wait for the disconnect to complete is to call `join()`.
      cluster.disconnect().join()
      #end::asynccluster[]

      print("tls")
      #tag::tls[]
      cluster = Cluster("couchbases://localhost",ClusterOptions(PasswordAuthenticator("username","password",cert_path="/path/to/cluster.crt")))
      #end::tls[]

      print("dnssrv")
      #tag::dnssrv[]
      env = ClusterEnvironment.builder() \
          .ioConfig(IoConfig.enableDnsSrv(true)) \
          .build()
コード例 #4
0
 def gen_collection(*args, **kwargs):
     try:
         base_bucket = Bucket(*args, **kwargs)
         return base_bucket.default_collection()
     except Exception as e:
         raise
コード例 #5
0
    def test_multinodeconnect(self):
      #tag::multinodeconnect[]
      cluster = Cluster.connect("couchbase://192.168.56.101,192.168.56.102", ClusterOptions(PasswordAuthenticator("username", "password")))
      #end::multinodeconnect[]

    #
    # def test_customenv(self):
    #   #tag::customenv[]
    #   env = ClusterEnvironment.builder()
    #       .build();
    #   # Customize client settings by calling methods on the builder
    #
    #   # Create a cluster using the environment's custom client settings.
    #   cluster = Cluster.connect("127.0.0.1", ClusterOptions
    #       .clusterOptions("username", "password")
    #       .environment(env))
    #
    #   # Shut down gracefully. Shut down the environment
    #   # after all associated clusters are disconnected.
    #   cluster.disconnect()
    #   env.shutdown()
    #   #end::customenv[]
    #
    #
    # def test_shareclusterenv(self):
    #   #tag::shareclusterenvironment[]
    #   env = ClusterEnvironment.builder()\
    #       .timeoutConfig(TimeoutConfig.kvTimeout(Duration.ofSeconds(5)))\
    #       .build()
    #
    #   clusterA = Cluster.connect(
    #       "clusterA.example.com",
    #       ClusterOptions("username", "password")
    #           .environment(env));
    #
    #   clusterB = Cluster.connect(
    #       "clusterB.example.com",
    #       ClusterOptions("username", "password")
    #           .environment(env));

    # # ...
    #
    #   # For a graceful shutdown, disconnect from the clusters
    #   # AND shut down the custom environment when then program ends.
    #   clusterA.disconnect();
    #   clusterB.disconnect();
    #   env.shutdown();
    #end::shareclusterenvironment[]

#
#     // todo use this example when beta 2 is released.
# //    {
# //      // #tag::seednodes[]
# //      int customKvPort = 12345;
# //      int customManagerPort = 23456
# //      Set<SeedNode> seedNodes = new HashSet<>(Arrays.asList(
# //          SeedNode.create("127.0.0.1",
# //              Optional.of(customKvPort),
# //              Optional.of(customManagerPort))))
# //
#
# //      Cluster cluster = Cluster.connect(seedNodes, "username", "password")
# //      // #end::customconnect[]
# //    }

      #tag::connectionstringparams[]
      cluster = Cluster.connect(
          "couchbases://127.0.0.1?compression=inout&log_redaction=on", ClusterOptions(PasswordAuthenticator("username", "password")))
      #end::connectionstringparams[]

      #tag::blockingtoasync[]
      cluster = Cluster.connect("127.0.0.1", ClusterOptions(PasswordAuthenticator("username", "password")))
      bucket = cluster.bucket("travel-sample")

      # Same API as Bucket, but completely async with asyncio Futures
      from acouchbase.bucket import Bucket
      async_bucket=Bucket("couchbase://127.0.0.1/default")

      cluster.disconnect()
      #end::blockingtoasync[]

      #tag::reactivecluster[]
      from acouchbase.bucket import Bucket
      cluster = Cluster("127.0.0.1", ClusterOptions(PasswordAuthenticator("username", "password")),bucket_class=Bucket)
      bucket = cluster.bucket("travel-sample")

      # A reactive cluster's disconnect methods returns a Mono<Void>.
      # Nothing actually happens until you subscribe to the Mono.
      # The simplest way to subscribe is to await completion by calling call `block()`.
      cluster.disconnect()
      #end::reactivecluster[]

      #tag::asynccluster[]
      cluster = Cluster.connect("127.0.0.1", ClusterOptions(PasswordAuthenticator("username", "password")))
      bucket = cluster.bucket("travel-sample")

      # An async cluster's disconnect methods returns a CompletableFuture<Void>.
      # The disconnection starts as soon as you call disconnect().
      # The simplest way to wait for the disconnect to complete is to call `join()`.
      cluster.disconnect().join()
      #end::asynccluster[]


      #tag::tls[]
      cluster = Cluster("couchbases://127.0.0.1",ClusterOptions(PasswordAuthenticator("username","password",cert_path="/path/to/cluster.crt")))
      #end::tls[]

      #tag::dnssrv[]
      env = ClusterEnvironment.builder()
          .ioConfig(IoConfig.enableDnsSrv(true))
          .build()
コード例 #6
0
class CouchbaseStorage(BaseStorage):
    """
    Dummy in-memory storage for testing
    """

    _transaction_strategy = 'resolve'
    _supports_unique_constraints = True

    _indexes_fields = ('zoid', 'id', 'part', 'resource', 'of', 'parent_id',
                       'type', 'otid', 'tid')
    _create_statement = 'CREATE INDEX {bucket}_object_{index_name} ON `{bucket}`(`{field}`)'  # noqa
    _counter_doc_id = '__g_txn_counter'

    def __init__(self,
                 read_only=False,
                 dsn=None,
                 username=None,
                 password=None,
                 bucket=None,
                 **kwargs):
        self._dsn = dsn
        self._username = username
        self._password = password
        self._bucket = bucket
        self._cb = None
        super().__init__(read_only)

    @property
    def bucket(self):
        return self._cb

    async def finalize(self):
        pass

    async def initialize(self, loop=None):
        from acouchbase.bucket import Bucket

        self._cb = Bucket(os.path.join(self._dsn, self._bucket),
                          username=self._username,
                          password=self._password)
        await self._cb.connect()

        installed_indexes = []
        primary_installed = False
        async for row in self._cb.n1ql_query(
                N1QLQuery('select * from system:indexes')):
            if row['indexes']['namespace_id'] != self._bucket:
                continue
            if row['indexes'].get('is_primary'):
                primary_installed = True
            else:
                installed_indexes.append(
                    row['indexes']['index_key'][0].strip('`'))

        if len(installed_indexes) == 0:
            logger.info('Initializing bucket, can take some time')

        if not primary_installed:
            logger.warning('Creating primary index')
            async for row in self._cb.n1ql_query(  # noqa
                    'CREATE PRIMARY INDEX ON {bucket}'.format(
                        bucket=self._bucket)):
                pass

        for field in self._indexes_fields:
            if field in installed_indexes:
                continue
            statement = self._create_statement.format(bucket=self._bucket,
                                                      index_name=field,
                                                      field_name=field)
            logger.warning('Creating index {}'.format(statement))
            async for row in self._cb.n1ql_query(  # noqa
                    statement.format(bucket=self._bucket)):
                pass

        for field in get_index_fields():
            if 'json.{}'.format(field) in installed_indexes:
                continue
            statement = self._create_statement.format(bucket=self._bucket,
                                                      field='json.' + field,
                                                      index_name='json_' +
                                                      field)
            logger.warning('Creating index {}'.format(statement))
            async for row in self._cb.n1ql_query(  # noqa
                    statement.format(bucket=self._bucket)):
                pass

    async def remove(self):
        """Reset the tables"""
        pass

    async def open(self):
        return self

    async def close(self, con):
        pass

    async def root(self):
        return await self.load(None, ROOT_ID)

    async def last_transaction(self, txn):
        return self._last_transaction

    async def get_next_tid(self, txn):
        if txn._tid is None:
            result = await self._cb.counter(self._counter_doc_id, 1, 1)
            txn._tid = result.value
        return txn._tid

    async def load(self, txn, oid):
        try:
            result = await self._cb.get(oid)
            value = result.value
            value['state'] = base64.b64decode(value['state'])
            return value
        except couchbase.exceptions.NotFoundError:
            raise KeyError(oid)

    async def start_transaction(self, txn):
        pass

    def get_txn(self, txn):
        if not getattr(txn, '_db_txn', None):
            txn._db_txn = self
        return txn._db_txn

    async def store(self, oid, old_serial, writer, obj, txn):
        p = writer.serialize()  # This calls __getstate__ of obj
        part = writer.part
        if part is None:
            part = 0

        json_data = {}
        future = index.get_future()
        if not obj.__new_marker__ and obj._p_serial is not None:
            # we should be confident this is an object update
            if future is not None and oid in future.update:
                json_data = future.update[oid]
            # only indexing updates
            await self._cb.mutate_in(
                oid, SD.upsert('tid', await self.get_next_tid(txn)),
                SD.upsert('size', len(p)), SD.upsert('part', part),
                SD.upsert('of', writer.of), SD.upsert('otid', old_serial),
                SD.upsert('parent_id', writer.parent_id),
                SD.upsert('id', writer.id), SD.upsert('type', writer.type),
                SD.upsert('state',
                          base64.b64encode(p).decode('ascii')))
        else:
            if future is not None:
                if oid in future.update:
                    json_data = future.update[oid]
                elif oid in future.index:
                    json_data = future.index[oid]
                else:
                    json_data = await writer.get_json()
            else:
                json_data = await writer.get_json()
            await self._cb.upsert(
                oid, {
                    'tid': await self.get_next_tid(txn),
                    'zoid': oid,
                    'size': len(p),
                    'part': part,
                    'resource': writer.resource,
                    'of': writer.of,
                    'otid': old_serial,
                    'parent_id': writer.parent_id,
                    'id': writer.id,
                    'type': writer.type,
                    'json': json_data,
                    'state': base64.b64encode(p).decode('ascii')
                })
        return 0, len(p)

    async def delete(self, txn, oid):
        await self._cb.remove(oid, quiet=True)

    async def commit(self, transaction):
        return await self.get_next_tid(transaction)

    async def abort(self, transaction):
        transaction._db_txn = None

    async def keys(self, txn, oid):
        keys = []
        async for row in self._cb.n1ql_query(
                N1QLQuery(
                    '''
SELECT id from `{}`
WHERE parent_id = $1'''.format(self._bucket), oid)):
            keys.append(row)
        return keys

    async def get_child(self, txn, parent_id, id):
        async for row in self._cb.n1ql_query(
                N1QLQuery(
                    '''
SELECT zoid, tid, state_size, resource, type, state, id
FROM `{}`
WHERE parent_id = $1 AND id = $2
'''.format(self._bucket), parent_id, id)):
            row['state'] = base64.b64decode(row['state'])
            return row

    async def has_key(self, txn, parent_id, id):
        async for row in self._cb.n1ql_query(  # noqa
                N1QLQuery(
                    '''
SELECT zoid
FROM `{}`
WHERE parent_id = $1 AND id = $2
'''.format(self._bucket), parent_id, id)):
            return True
        return False

    async def len(self, txn, oid):
        async for row in self._cb.n1ql_query(
                N1QLQuery(
                    '''
SELECT count(*) FROM `{}` WHERE parent_id = $1
'''.format(self._bucket), oid)):
            return row['$1']
        return 0

    async def items(self, txn, oid):  # pragma: no cover
        async for row in self._cb.n1ql_query(
                N1QLQuery(
                    '''
SELECT zoid, tid, state_size, resource, type, state, id
FROM `{}`
WHERE parent_id = $1
'''.format(self._bucket), oid)):
            row['state'] = base64.b64decode(row['state'])
            yield row

    async def get_children(self, txn, parent, keys):
        items = []
        async for row in self._cb.n1ql_query(
                N1QLQuery(
                    '''
SELECT zoid, tid, state_size, resource, type, state, id
FROM `{}`
WHERE parent_id = $1 AND id IN $2
'''.format(self._bucket), parent, keys)):
            row['state'] = base64.b64decode(row['state'])
            items.append(row)
        return items

    async def get_annotation(self, txn, oid, id):
        async for row in self._cb.n1ql_query(
                N1QLQuery(
                    '''
SELECT zoid, tid, state_size, resource, type, state, id, parent_id
FROM `{}`
WHERE
    of = $1 AND id = $2
'''.format(self._bucket), oid, id)):
            row['state'] = base64.b64decode(row['state'])
            return row

    async def get_annotation_keys(self, txn, oid):
        async for row in self._cb.n1ql_query(
                N1QLQuery(
                    '''
SELECT id, parent_id
FROM `{}`
WHERE of = $1
'''.format(self._bucket), oid)):
            return row

    async def del_blob(self, txn, bid):
        raise NotImplementedError()

    async def write_blob_chunk(self, txn, bid, oid, chunk_index, data):
        raise NotImplementedError()

    async def read_blob_chunk(self, txn, bid, chunk=0):
        raise NotImplementedError()

    async def get_conflicts(self, txn):
        return []

    async def get_page_of_keys(self, txn, oid, page=1, page_size=1000):
        print('get_page_of_keys {} {}'.format(oid, id))