def test_can_authenticate_with_cert_path_and_username_password_via_PasswordAuthenticator( self): cluster = Cluster('couchbases://{host}?certpath={certpath}'.format( host=self.cluster_info.host, certpath=CERT_PATH)) authenticator = PasswordAuthenticator(self.cluster_info.admin_username, self.cluster_info.admin_password) cluster.authenticate(authenticator) self._test_allow_cert_path_with_SSL_mock_errors( cluster.open_bucket, self.cluster_info.bucket_name)
def test_PYCBC_488(self): cluster = Cluster( 'couchbases://10.142.175.101?certpath=/Users/daschl/tmp/ks/chain.pem&keypath=/Users/daschl/tmp/ks/pkey.key' ) with self.assertRaises(MixedAuthException) as maerr: cluster.open_bucket("pixels", password=self.cluster_info.bucket_password) exception = maerr.exception self.assertIsInstance(exception, MixedAuthException) self.assertRegex(exception.message, r'.*CertAuthenticator.*password.*')
def _create_cluster_clean(self, authenticator): connargs = self.make_connargs() connstr = ConnectionString.parse(str( connargs.pop('connection_string'))) connstr.clear_option('username') bucket = connstr.bucket connstr.bucket = None password = connargs.get('password', None) keys_to_skip = authenticator.get_credentials(bucket)['options'].keys() for entry in keys_to_skip: connstr.clear_option(entry) cluster = Cluster(connstr, bucket_factory=self.factory) cluster.authenticate(ClassicAuthenticator(buckets={bucket: password})) return cluster, bucket
def __init__(self, connection_string, # type: str options=None, # type: ClusterOptions bucket_factory=Bucket, # type: Any **kwargs # type: Any ): self._authenticator = kwargs.pop('authenticator', None) self.__is_6_5 = None # copy options if they exist, as we mutate it cluster_opts = deepcopy(options) or ClusterOptions(self._authenticator) if not self._authenticator: self._authenticator = cluster_opts.pop('authenticator', None) if not self._authenticator: raise InvalidArgumentException("Authenticator is mandatory") async_items = {k: kwargs.pop(k) for k in list(kwargs.keys()) if k in {'_iops', '_flags'}} # fixup any overrides to the ClusterOptions here as well args, kwargs = cluster_opts.split_args(**kwargs) self.connstr = cluster_opts.update_connection_string(connection_string, **args) self.__admin = None self._cluster = CoreCluster(self.connstr, bucket_factory=bucket_factory) # type: CoreCluster self._cluster.authenticate(self._authenticator) credentials = self._authenticator.get_credentials() self._clusteropts = dict(**credentials.get('options', {})) # TODO: eliminate the 'mock hack' and ClassicAuthenticator, then you can remove this as well. self._clusteropts.update(kwargs) self._adminopts = dict(**self._clusteropts) self._clusteropts.update(async_items) self._connstr_opts = cluster_opts self.connstr = cluster_opts.update_connection_string(self.connstr) super(Cluster, self).__init__(connection_string=str(self.connstr), _conntype=_LCB.LCB_TYPE_CLUSTER, **self._clusteropts)
def test_PYCBC_489(self): from couchbase_v2.cluster import Cluster with self.assertRaises(MixedAuthException) as maerr: cluster = Cluster( 'couchbases://10.142.175.101?certpath=/Users/daschl/tmp/ks/chain.pem&keypath=/Users/daschl/tmp/ks/pkey.key' ) cb = cluster.open_bucket('pixels', password='******') cb.upsert( 'u:king_arthur', { 'name': 'Arthur', 'email': '*****@*****.**', 'interests': ['Holy Grail', 'African Swallows'] }) exception = maerr.exception self.assertIsInstance(exception, MixedAuthException) self.assertRegex(exception.message, r'.*CertAuthenticator-style.*password.*')
def _create_cluster(self): connargs = self.make_connargs() connstr = ConnectionString.parse(str( connargs.pop('connection_string'))) connstr.clear_option('username') bucket = connstr.bucket connstr.bucket = None password = connargs.get('password', '') # Can I open a new bucket via open_bucket? cluster = Cluster(connstr, bucket_factory=self.factory) cluster.authenticate( ClassicAuthenticator( buckets={bucket: password}, cluster_password=self.cluster_info.admin_password, cluster_username=self.cluster_info.admin_username)) return cluster, bucket
class Cluster(CoreClient): @internal def __init__( self, connection_string, # type: str options=None, # type: ClusterOptions bucket_factory=Bucket, # type: Any **kwargs # type: Any ): self._authenticator = kwargs.pop('authenticator', None) self.__is_6_5 = None # copy options if they exist, as we mutate it cluster_opts = deepcopy(options) or ClusterOptions(self._authenticator) if not self._authenticator: self._authenticator = cluster_opts.pop('authenticator', None) if not self._authenticator: raise InvalidArgumentException("Authenticator is mandatory") async_items = { k: kwargs.pop(k) for k in list(kwargs.keys()) if k in {'_iops', '_flags'} } # fixup any overrides to the ClusterOptions here as well args, kwargs = cluster_opts.split_args(**kwargs) self.connstr = cluster_opts.update_connection_string( connection_string, **args) self.__admin = None self._cluster = CoreCluster( self.connstr, bucket_factory=bucket_factory) # type: CoreCluster self._cluster.authenticate(self._authenticator) credentials = self._authenticator.get_credentials() self._clusteropts = dict(**credentials.get('options', {})) # TODO: eliminate the 'mock hack' and ClassicAuthenticator, then you can remove this as well. self._clusteropts.update(kwargs) self._adminopts = dict(**self._clusteropts) self._clusteropts.update(async_items) self._connstr_opts = cluster_opts self.connstr = cluster_opts.update_connection_string(self.connstr) super(Cluster, self).__init__(connection_string=str(self.connstr), _conntype=_LCB.LCB_TYPE_CLUSTER, **self._clusteropts) @classmethod def connect( cls, connection_string, # type: str options=None, # type: ClusterOptions **kwargs): # type: (...) -> Cluster """ Create a Cluster object. An Authenticator must be provided, either as the authenticator named parameter, or within the options argument. :param connection_string: the connection string for the cluster. :param options: options for the cluster. :param Any kwargs: Override corresponding value in options. """ return cls(connection_string, options, **kwargs) def _do_ctor_connect(self, *args, **kwargs): super(Cluster, self)._do_ctor_connect(*args, **kwargs) def _check_for_shutdown(self): if not self._cluster: raise AlreadyShutdownException( "This cluster has already been shutdown") @property @internal def _admin(self): self._check_for_shutdown() if not self.__admin: c = ConnectionString.parse(self.connstr) if not c.bucket: c.bucket = self._adminopts.pop('bucket', None) self.__admin = Admin(connection_string=str(c), **self._adminopts) return self.__admin def bucket( self, name # type: str ): # type: (...) -> Bucket """ Open a bucket on this cluster. This doesn't create a bucket, merely opens an existing bucket. :param name: Name of bucket to open. :return: The :class:~.bucket.Bucket` you requested. :raise: :exc:`~.exceptions.BucketDoesNotExistException` if the bucket has not been created on this cluster. """ self._check_for_shutdown() if not self.__admin: self._adminopts['bucket'] = name return self._cluster.open_bucket(name, admin=self._admin) # Temporary, helpful with working around CCBC-1204. We should be able to get rid of this # logic when this issue is fixed. def _is_6_5_plus(self): self._check_for_shutdown() # lets just check once. Below, we will only set this if we are sure about the value. if self.__is_6_5 is not None: return self.__is_6_5 try: response = self._admin.http_request(path="/pools").value v = response.get("implementationVersion") # lets just get first 3 characters -- the string should be X.Y.Z-XXXX-YYYY and we only care about # major and minor version self.__is_6_5 = (float(v[:3]) >= 6.5) except NetworkException as e: # the cloud doesn't let us query this endpoint, and so lets assume this is a cloud instance. However # lets not actually set the __is_6_5 flag as this also could be a transient error. That means cloud # instances check every time, but this is only temporary. return True except ValueError: # this comes from the conversion to float -- the mock says "CouchbaseMock..." self.__is_6_5 = True return self.__is_6_5 def query( self, statement, # type: str *options, # type: Union[QueryOptions,Any] **kwargs # type: Any ): # type: (...) -> QueryResult """ Perform a N1QL query. :param statement: the N1QL query statement to execute :param options: A QueryOptions object or the positional parameters in the query. :param kwargs: Override the corresponding value in the Options. If they don't match any value in the options, assumed to be named parameters for the query. :return: The results of the query or error message if the query failed on the server. :raise: :exc:`~.exceptions.QueryException` - for errors involving the query itself. Also any exceptions raised by underlying system - :class:`~.exceptions.TimeoutException` for instance. """ # we could have multiple positional parameters passed in, one of which may or may not be # a QueryOptions. Note if multiple QueryOptions are passed in for some strange reason, # all but the last are ignored. self._check_for_shutdown() itercls = kwargs.pop('itercls', QueryResult) opt = QueryOptions() opts = list(options) for o in opts: if isinstance(o, QueryOptions): opt = o opts.remove(o) # if not a 6.5 cluster, we need to query against a bucket. We think once # CCBC-1204 is addressed, we can just use the cluster's instance return self._maybe_operate_on_an_open_bucket( CoreClient.query, QueryException, opt.to_n1ql_query(statement, *opts, **kwargs), itercls=itercls, err_msg="Query requires an open bucket") # gets a random bucket from those the cluster has opened def _get_an_open_bucket(self, err_msg): clients = [v() for k, v in self._cluster._buckets.items()] clients = [v for v in clients if v] if clients: return choice(clients) raise NoBucketException(err_msg) def _maybe_operate_on_an_open_bucket(self, verb, failtype, *args, **kwargs): if self._is_6_5_plus(): kwargs.pop('err_msg', None) return self._operate_on_cluster(verb, failtype, *args, **kwargs) return self._operate_on_an_open_bucket(verb, failtype, *args, **kwargs) def _operate_on_an_open_bucket(self, verb, failtype, *args, **kwargs): try: return verb( self._get_an_open_bucket( kwargs.pop('err_msg', 'Cluster has no open buckets')), *args, **kwargs) except Exception as e: raise_from( failtype(params=CouchbaseException.ParamType( message='Cluster operation on bucket failed', inner_cause=e)), e) def _operate_on_cluster( self, verb, failtype, # type: Type[CouchbaseException] *args, **kwargs): try: return verb(self, *args, **kwargs) except Exception as e: raise_from( failtype(params=CouchbaseException.ParamType( message="Cluster operation failed", inner_cause=e)), e) # for now this just calls functions. We can return stuff if we need it, later. def _sync_operate_on_entire_cluster(self, verb, *args, **kwargs): clients = [v() for k, v in self._cluster._buckets.items()] clients = [v for v in clients if v] clients.append(self) results = [] for c in clients: results.append(verb(c, *args, **kwargs)) return results async def _operate_on_entire_cluster(self, verb, failtype, *args, **kwargs): # if you don't have a cluster client yet, then you don't have any other buckets open either, so # this is the same as operate_on_cluster if not self._cluster._buckets: return self._operate_on_cluster(verb, failtype, *args, **kwargs) async def coroutine(client, verb, *args, **kwargs): return verb(client, *args, **kwargs) # ok, lets loop over all the buckets, and the clusterclient. And lets do it async so it isn't miserably # slow. So we will create a list of tasks and execute them together... tasks = [asyncio.ensure_future(coroutine(self, verb, *args, **kwargs))] for name, c in self._cluster._buckets.items(): client = c() if client: tasks.append(coroutine(client, verb, *args, **kwargs)) done, pending = await asyncio.wait(tasks) results = [] for d in done: results.append(d.result()) return results def analytics_query( self, # type: Cluster statement, # type: str, *options, # type: AnalyticsOptions **kwargs): # type: (...) -> AnalyticsResult """ Executes an Analytics query against the remote cluster and returns a AnalyticsResult with the results of the query. :param statement: the analytics statement to execute :param options: the optional parameters that the Analytics service takes based on the Analytics RFC. :return: An AnalyticsResult object with the results of the query or error message if the query failed on the server. :raise: :exc:`~.exceptions.AnalyticsException` errors associated with the analytics query itself. Also, any exceptions raised by the underlying platform - :class:`~.exceptions.TimeoutException` for example. """ # following the query implementation, but this seems worth revisiting soon self._check_for_shutdown() itercls = kwargs.pop('itercls', AnalyticsResult) opt = AnalyticsOptions() opts = list(options) for o in opts: if isinstance(o, AnalyticsOptions): opt = o opts.remove(o) return self._maybe_operate_on_an_open_bucket( CoreClient.analytics_query, AnalyticsException, opt.to_analytics_query(statement, *opts, **kwargs), itercls=itercls, err_msg='Analytics queries require an open bucket') def search_query( self, index, # type: str query, # type: SearchQuery *options, # type: SearchOptions **kwargs): # type: (...) -> SearchResult """ Executes a Search or FTS query against the remote cluster and returns a SearchResult implementation with the results of the query. .. code-block:: python from couchbase.search import MatchQuery, SearchOptions it = cb.search('name', MatchQuery('nosql'), SearchOptions(limit=10)) for hit in it: print(hit) :param str index: Name of the index to use for this query. :param query: the fluent search API to construct a query for FTS. :param options: the options to pass to the cluster with the query. :param kwargs: Overrides corresponding value in options. :return: A :class:`~.search.SearchResult` object with the results of the query or error message if the query failed on the server. :raise: :exc:`~.exceptions.SearchException` Errors related to the query itself. Also, any exceptions raised by the underlying platform - :class:`~.exceptions.TimeoutException` for example. """ self._check_for_shutdown() def do_search(dest): search_params = SearchOptions.gen_search_params_cls( index, query, *options, **kwargs) return search_params.itercls(search_params.body, dest, **search_params.iterargs) return self._maybe_operate_on_an_open_bucket( do_search, SearchException, err_msg="No buckets opened on cluster") _root_diag_data = {'id', 'version', 'sdk'} def diagnostics( self, *options, # type: DiagnosticsOptions **kwargs): # type: (...) -> DiagnosticsResult """ Creates a diagnostics report that can be used to determine the healthfulness of the Cluster. :param options: Options for the diagnostics :return: A :class:`~.diagnostics.DiagnosticsResult` object with the results of the query or error message if the query failed on the server. """ self._check_for_shutdown() result = self._sync_operate_on_entire_cluster( CoreClient.diagnostics, **forward_args(kwargs, *options)) return DiagnosticsResult(result) def ping( self, *options, # type: PingOptions **kwargs): # type: (...) -> PingResult """ Actively contacts each of the services and returns their pinged status. :param options: Options for sending the ping request. :param kwargs: Overrides corresponding value in options. :return: A :class:`~.result.PingResult` representing the state of all the pinged services. :raise: :class:`~.exceptions.CouchbaseException` for various communication issues. """ bucket = self._get_an_open_bucket("Ping requires an open bucket") if bucket: return PingResult(bucket.ping(*options, **kwargs)) raise NoBucketException("ping requires a bucket be opened first") def users(self): # type: (...) -> UserManager """ Get the UserManager. :return: A :class:`~.management.UserManager` with which you can create or update cluster users and roles. """ self._check_for_shutdown() return UserManager(self._admin) def query_indexes(self): # type: (...) -> QueryIndexManager """ Get the QueryIndexManager. :return: A :class:`~.management.QueryIndexManager` with which you can create or modify query indexes on the cluster. """ self._check_for_shutdown() return QueryIndexManager(self._admin) def search_indexes(self): # type: (...) -> SearchIndexManager """ Get the SearchIndexManager. :return: A :class:`~.management.SearchIndexManager` with which you can create or modify search (FTS) indexes on the cluster. """ self._check_for_shutdown() return SearchIndexManager(self._admin) def analytics_indexes(self): # type: (...) -> AnalyticsIndexManager """ Get the AnalyticsIndexManager. :return: A :class:`~.management.AnalyticsIndexManager` with which you can create or modify analytics datasets, dataverses, etc.. on the cluster. """ self._check_for_shutdown() return AnalyticsIndexManager(self) def buckets(self): # type: (...) -> BucketManager """ Get the BucketManager. :return: A :class:`~.management.BucketManager` with which you can create or modify buckets on the cluster. """ self._check_for_shutdown() return BucketManager(self._admin) def disconnect(self): # type: (...) -> None """ Closes and cleans up any resources used by the Cluster and any objects it owns. :return: None :raise: Any exceptions raised by the underlying platform. """ # in this context, if we invoke the _cluster's destructor, that will do same for # all the buckets we've opened, unless they are stored elswhere and are actively # being used. self._cluster = None self.__admin = None # Only useful for 6.5 DP testing def _is_dev_preview(self): self._check_for_shutdown() return self._admin.http_request(path="/pools").value.get( "isDeveloperPreview", False) @property def query_timeout(self): # type: (...) -> timedelta """ The timeout for N1QL query operations, as a `timedelta`. This affects the :meth:`query` method. This can be set in :meth:`connect` by passing in a :class:`ClusterOptions` with the query_timeout set to the desired time. Timeouts may also be adjusted on a per-query basis by setting the :attr:`timeout` property in the options to the n1ql_query method. The effective timeout is either the per-query timeout or the global timeout, whichever is lower. """ self._check_for_shutdown() return timedelta( seconds=self._get_timeout_common(_LCB.LCB_CNTL_QUERY_TIMEOUT)) @property def tracing_threshold_query(self): # type: (...) -> timedelta """ The tracing threshold for query response times, as `timedelta`. This can be set in the :meth:`connect` by passing in a :class:`~.ClusterOptions` with the desired tracing_threshold_query set in it. """ return timedelta(seconds=self._cntl(op=_LCB.TRACING_THRESHOLD_QUERY, value_type="timeout")) @property def tracing_threshold_search(self): # type: (...) -> timedelta """ The tracing threshold for search response times, as `timedelta`. This can be set in the :meth:`connect` by passing in a :class:`~.ClusterOptions` with the desired tracing_threshold_search set in it. """ return timedelta(seconds=self._cntl(op=_LCB.TRACING_THRESHOLD_SEARCH, value_type="timeout")) @property def tracing_threshold_analytics(self): # type: (...) -> timedelta """ The tracing threshold for analytics, as `timedelta`. This can be set in the :meth:`connect` by passing in a :class:`~.ClusterOptions` with the desired tracing_threshold_analytics set in it. """ return timedelta(seconds=self._cntl( op=_LCB.TRACING_THRESHOLD_ANALYTICS, value_type="timeout")) @property def tracing_orphaned_queue_flush_interval(self): # type: (...) -> timedelta """ Returns the interval that the orphaned responses are logged, as a `timedelta`. This can be set in the :meth:`connect` by passing in a :class:`~.ClusterOptions` with the desired interval set in it. """ return timedelta( seconds=self._cntl(op=_LCB.TRACING_ORPHANED_QUEUE_FLUSH_INTERVAL, value_type="timeout")) @property def tracing_orphaned_queue_size(self): # type: (...) -> int """ Returns the tracing orphaned queue size. This can be set in the :meth:`connect` by passing in a :class:`~.ClusterOptions` with the size set in it. """ return self._cntl(op=_LCB.TRACING_ORPHANED_QUEUE_SIZE, value_type="uint32_t") @property def tracing_threshold_queue_flush_interval(self): # type: (...) -> timedelta """ The tracing threshold queue flush interval, as a `timedelta`. This can be set in the :meth:`connect` by passing in a :class:`~.ClusterOptions` with the desired interval set in it. """ return timedelta( seconds=self._cntl(op=_LCB.TRACING_THRESHOLD_QUEUE_FLUSH_INTERVAL, value_type="timeout")) @property def tracing_threshold_queue_size(self): # type: (...) -> int """ The tracing threshold queue size. This can be set in the :meth:`connect` by passing in a :class:`~.ClusterOptions` with the desired size set in it. """ return self._cntl(op=_LCB.TRACING_THRESHOLD_QUEUE_SIZE, value_type="uint32_t") @property def redaction(self): # type: (...) -> bool """ Returns whether or not the logs will redact sensitive information. """ return bool(self._cntl(_LCB.LCB_CNTL_LOG_REDACTION, value_type='int')) @property def compression(self): # type: (...) -> Compression """ Returns the compression mode to be used when talking to the server. See :class:`Compression` for details. This can be set in the :meth:`connect` by passing in a :class:`~.ClusterOptions` with the desired compression set in it. """ return Compression.from_int( self._cntl(_LCB.LCB_CNTL_COMPRESSION_OPTS, value_type='int')) @property def compression_min_size(self): # type: (...) -> int """ Minimum size (in bytes) of the document payload to be compressed when compression enabled. This can be set in the :meth:`connect` by passing in a :class:`~.ClusterOptions` with the desired compression set in it. """ return self._cntl(_LCB.LCB_CNTL_COMPRESSION_MIN_SIZE, value_type='uint32_t') @property def compression_min_ratio(self): # type: (...) -> float """ Minimum compression ratio (compressed / original) of the compressed payload to allow sending it to cluster. This can be set in the :meth:`connect` by passing in a :class:`~.ClusterOptions` with the desired ratio set in it. """ return self._cntl(_LCB.LCB_CNTL_COMPRESSION_MIN_RATIO, value_type='float') @property def is_ssl(self): # type: (...) -> bool """ Read-only boolean property indicating whether SSL is used for this connection. If this property is true, then all communication between this object and the Couchbase cluster is encrypted using SSL. See :meth:`__init__` for more information on connection options. """ mode = self._cntl(op=_LCB.LCB_CNTL_SSL_MODE, value_type='int') return mode & _LCB.LCB_SSL_ENABLED != 0