Example #1
0
class Db:
    # CLUSTER
    cluster = None

    # ITEMS
    bucket_items = None

    # VAR
    bucket_var = None

    # INIT
    def __init__(self):
        # CONNECT DB
        self.cluster = Cluster(
            'couchbase://couchbase',
            ClusterOptions(
                PasswordAuthenticator(os.environ.get('DB_USER'),
                                      os.environ.get('DB_PASSWORD'))))

        # BUCKET
        self.bucket_items = self.cluster.bucket('items').default_collection()
        self.bucket_var = self.cluster.bucket('var').default_collection()

    # GET BUCKET ITEM
    def get_bucket_items(self):
        return self.bucket_items

    # GET BUCKET VAR
    def get_bucket_var(self):
        return self.bucket_var
Example #2
0
    def uploadDoc(self):
        # connect to cb cluster
        try:
            connection = "couchbase://" + self.server
            if "ip6" in self.server or self.server.startswith("["):
                connection = connection + "?ipv6=allow"
            cluster = Cluster(
                connection,
                ClusterOptions(
                    PasswordAuthenticator(self.username, self.password)))
            # authenticator = PasswordAuthenticator(self.username, self.password)
            # cluster.authenticate(authenticator)
            cb = cluster.bucket(self.bucket)
            cb_coll = cb.default_collection()
            cb.timeout = 100
        except Exception as e:
            logging.error("Connection error\n" + traceback.format_exc())
        json_docs = {}
        for i in range(self.startseqnum, self.startseqnum + self.num_docs):
            self.createContent()
            dockey = self.keyprefix + str(i)
            json_docs[dockey] = self.json_objs_dict

        BYTES_PER_BATCH = 1024 * 256  # 256K
        batches = []
        cur_batch = {}
        cur_size = 0
        batches.append(cur_batch)

        for key, value in list(json_docs.items()):
            cur_batch[key] = value
            cur_size += len(key) + len(value) + 24
            if cur_size > BYTES_PER_BATCH:
                cur_batch = {}
                batches.append(cur_batch)
                cur_size = 0

        num_completed = 0
        while batches:
            batch = batches[-1]
            try:
                cb_coll.upsert_multi(batch)
                num_completed += len(batch)
                batches.pop()
            except CouchbaseTransientError as e:
                logging.error(e)
                ok, fail = e.split_results()
                new_batch = {}
                for key in fail:
                    new_batch[key] = list(json_docs.items())[key]
                batches.pop()
                batches.append(new_batch)
                num_completed += len(ok)
                logging.info("Retrying {}/{} items".format(
                    len(new_batch), len(ok)))
            logging.info("Completed {}/{} items".format(
                num_completed, len(json_docs)))
        if self.xattrs:
            logging.info("Upserting xattrs")
            self.add_xattrs(cb)
Example #3
0
class CollectionsWrapper:

    TIMEOUT = 120

    def __init__(self, host, bucket, username, password, quiet=True, port=8091):
        connection_string = 'couchbase://{}?password={}'.format(host, password)
        pass_auth = PasswordAuthenticator(username, password)
        timeout = ClusterTimeoutOptions(kv_timeout=timedelta(seconds=self.TIMEOUT))
        options = ClusterOptions(authenticator=pass_auth, timeout_options=timeout)
        self.cluster = Cluster(connection_string=connection_string, options=options)
        self.bucket = self.cluster.bucket(bucket)
        self.client = self.bucket.default_collection()
        self.use_count = 0
        self.use_time = 0
        self.last_use_time = 0

    def start_using(self):
        self.last_use_time = time()

    def stop_using(self):
        self.use_time += time() - self.last_use_time
        self.use_count += 1

    def query(self, ddoc, view, key):
        return self.cluster.view_query(ddoc, view, key=key)

    def set(self, key, doc):
        self.client.insert(key, doc)

    def delete(self, key):
        self.client.remove(key)
Example #4
0
 def test_adb_cb4_connection_no_cert(self):
     # noinspection PyBroadException
     try:
         _credentials_file = os.environ['HOME'] + "/adb-cb4-credentials"
         # specify the cluster and specify an authenticator containing a
         # username and password to be passed to the cluster.
         if not Path(_credentials_file).is_file():
             sys.exit("*** credentials_file file " + _credentials_file +
                      " can not be found!")
         self.get_credentials(_credentials_file)
         cluster = Cluster(
             'couchbase://' + self.cb_host,
             ClusterOptions(
                 PasswordAuthenticator(self.cb_user, self.cb_password)))
         # following a successful authentication, a bucket can be opened.
         # access a bucket in that cluster
         bucket = cluster.bucket('mdata')
         collection = bucket.default_collection()
         ingest_document_result = collection.get(
             "MD:V01:METAR:stations_ingest")
         print("test_adb_cb4_connection_no_cert: successfully read ",
               ingest_document_result.content)
     except:
         self.fail(
             "test_adb_cb4_connection_no_cert: TestConnection.test_connection Exception failure: "
             + str(sys.exc_info()))
Example #5
0
def get_bucket(conf):
    cluster = Cluster("{host}:{port}".format(host=conf['host'],
                                             port=conf['port']),
                      options=ClusterOptions(
                          PasswordAuthenticator(username=conf['user'],
                                                password=conf['password'])))
    return cluster.bucket(str(conf['bucket']))
Example #6
0
class CouchbaseModel:
    CB_BUCKET_NAME = 'django'

    def __init__(self):
        self.cluster = None
        self.collection = None

        self.initialize_cluster()
        self.initialize_collection()

    def initialize_cluster(self):
        password_authenticator = PasswordAuthenticator('Administrator',
                                                       'Administrator')
        cluster_options = ClusterOptions(password_authenticator)
        self.cluster = Cluster('couchbase://localhost', cluster_options)

    def initialize_collection(self):
        bucket = self.cluster.bucket(self.CB_BUCKET_NAME)

        try:
            self.cluster.query_indexes().create_primary_index(
                self.CB_BUCKET_NAME)
        except:
            pass

        self.collection = bucket.default_collection()
Example #7
0
    def test_async(self):
      print("blockingtoasync")
      #tag::blockingtoasync[]
      cluster = Cluster.connect("couchbase://localhost", ClusterOptions(PasswordAuthenticator("username", "password")))
      bucket = cluster.bucket("travel-sample")

      # Same API as Bucket, but completely async with asyncio Futures
      from acouchbase.bucket import Bucket
      async_bucket=Bucket("couchbase://localhost/default")

      cluster.disconnect()
      #end::blockingtoasync[]

      print("reactivecluster")
      #tag::reactivecluster[]
      from acouchbase.bucket import Bucket
      cluster = Cluster("couchbase://localhost", ClusterOptions(PasswordAuthenticator("username", "password")),bucket_class=Bucket)
      bucket = cluster.bucket("travel-sample")

      # A reactive cluster's disconnect methods returns a Mono<Void>.
      # Nothing actually happens until you subscribe to the Mono.
      # The simplest way to subscribe is to await completion by calling call `block()`.
      cluster.disconnect()
      #end::reactivecluster[]

      print("asynccluster")
      #tag::asynccluster[]
      cluster = Cluster.connect("couchbase://localhost", ClusterOptions(PasswordAuthenticator("username", "password")))
      bucket = cluster.bucket("travel-sample")

      # An async cluster's disconnect methods returns a CompletableFuture<Void>.
      # The disconnection starts as soon as you call disconnect().
      # The simplest way to wait for the disconnect to complete is to call `join()`.
      cluster.disconnect().join()
      #end::asynccluster[]

      print("tls")
      #tag::tls[]
      cluster = Cluster("couchbases://localhost",ClusterOptions(PasswordAuthenticator("username","password",cert_path="/path/to/cluster.crt")))
      #end::tls[]

      print("dnssrv")
      #tag::dnssrv[]
      env = ClusterEnvironment.builder() \
          .ioConfig(IoConfig.enableDnsSrv(true)) \
          .build()
Example #8
0
def connect_couchbase(bucket_name):
    # get a reference to our cluster
    cluster = Cluster(host_url, ClusterOptions(PasswordAuthenticator(user, password)))

    # get a reference to our bucket
    cb = cluster.bucket('default')
    # get a reference to the default collection
    collection = cb.default_collection()
    return collection
Example #9
0
def get_cb_bucket(cdbname):
    pa = PasswordAuthenticator(cb_su, cb_pass)
    try:
        cluster = Cluster('couchbase://127.0.0.1', ClusterOptions(pa))
        db = cluster.bucket(cdbname)
        return db
    except couchbase.exceptions.CouchbaseError as err:
        print(err)
        return "an error occurred"
Example #10
0
class CBDoc:
    def __init__(self):
        config = os.environ
        self.cb_host = config.get("health_cb_host", "172.23.104.180")
        self.cb_bucket = config.get("health_cb_bucket", "QE-staticserver-pool-health")
        self.cb_username = config.get("health_cb_username", "Administrator")
        self.cb_userpassword = config.get("health_cb_password")
        if not self.cb_userpassword:
            print("Setting of env variable: heal_cb_password= is needed!")
            return
        try:
            self.cb_cluster = Cluster("couchbase://"+self.cb_host, ClusterOptions(PasswordAuthenticator(self.cb_username, self.cb_userpassword), \
                                    timeout_options=ClusterTimeoutOptions(kv_timeout=timedelta(seconds=10))))
            self.cb_b = self.cb_cluster.bucket(self.cb_bucket)
            self.cb = self.cb_b.default_collection()
            
        except Exception as e:
            print('Connection Failed: %s ' % self.cb_host)
            print(e)

    def get_doc(self, doc_key, retries=3):
        while retries > 0:
            try:
                return self.cb.get(doc_key)
            except Exception as e:
                print('Error while getting doc %s !' % doc_key)
                print(e)
            time.sleep(5)
            retries -= 1

    def save_doc(self, doc_key, doc_value, retries=3):
        while retries > 0:
            try:
                self.cb.upsert(doc_key, doc_value)
                print("%s added/updated successfully" % doc_key)
                break
            except Exception as e:
                print('Document with key: %s saving error' % doc_key)
                print(e)
            time.sleep(5)
            retries -= 1

    def remove_doc(self, ip, retries=3):
        while retries > 0:
            try:
                static_doc_value = self.static_cb.get(ip).value
                self.static_cb.remove(ip)
                log.info("{} removed from static pools: {}".format(ip, ",".join(static_doc_value["poolId"])))
                break
            except NotFoundError:
                break
            except Exception as e:
                print("Error removing {} from static pools".format(ip))
                print(e)
            time.sleep(5)
            retries -= 1
Example #11
0
 def init_connection(self, args):
     global couchbase_connection
     connection_string = 'couchbase://%s' % (args['server'])
     couchbase_cluster = Cluster(
         connection_string,
         ClusterOptions(
             PasswordAuthenticator(args['username'], args['password'])))
     couchbase_bucket = couchbase_cluster.bucket(args['bucket'])
     self.bucket_name = args['bucket']
     self.couchbase_connection = couchbase_bucket.default_collection()
Example #12
0
 def _createConn(self):
     try:
         cluster = Cluster(self.connection_string, ClusterOptions(PasswordAuthenticator(self.bucket, 'password')))
         #cluster.authenticate(PasswordAuthenticator(self.bucket, 'password'))
         self.cb = cluster.bucket(self.bucket)
         self.default_collection = self.cb.default_collection()
     except BucketNotFoundError:
          raise
     except AuthError:
         # Try using default user created by the tests, if any, in case there is no user with bucket name in the
         # cluster.
         try:
             cluster = Cluster(self.connection_string,
                               ClusterOptions(PasswordAuthenticator("cbadminbucket", 'password')),
                               bucket_class=CouchbaseBucket)
             self.cb = cluster.bucket(self.bucket)
             self.default_collection = self.cb.default_collection()
         except AuthError:
             raise
def file_loc_processing():
    global file_counter
    # get a reference to our cluster
    cluster = Cluster(
        'couchbase://' + options.host,
        ClusterOptions(PasswordAuthenticator(options.user, options.password)))

    file_loc = options.agent_env['HVR_FILE_LOC']
    tbl_map = table_file_name_map()

    client = cluster.bucket(options.database)
    db = cb_coll = client.default_collection()

    for t in tbl_map:
        base_name = table_name_normalize(t[0])
        hvr_schema = base_name[0]
        hvr_base_table = base_name[1]
        collection = options.collection.format(hvr_schema=hvr_schema,
                                               hvr_base_name=hvr_base_table,
                                               hvr_tbl_name=t[1])
        keys = t[3].split(",")
        # if collection is absent - no any error
        #       if options.recreate :
        #           trace("Dropping collection '{0}'".format(collection))
        #           db.drop_collection(collection)

        for name in tbl_map[t]:
            full_name = file_loc + '/' + name
            trace("Reading and parsing file '" + full_name + "' ... ")
            try:
                with open(full_name) as json_file:
                    for line in json_file:
                        json_obj = json.loads(line)
                        try:
                            #if options.mode == SetupMode.MODE_SOFTDELETE :
                            #added new pseudo column
                            column_id = ''
                            for column in keys:
                                column_id = column_id + "::" + str(
                                    json_obj[column])
                            #trace("key will equal:" + str(column_id.replace('::', "", 1)) + " /n")
                            mykey = str(hvr_base_table) + str(column_id)
                            trace("key will equal:" + mykey + " /n")
                            result = db.upsert(mykey, str(json_obj))
                            #trace(str(result.cas) + " mykey:" + str(mykey) + " data:" + str(json_obj))
                        except Exception as e:
                            trace(e)
                # remove successfully transmitted file
                os.remove(full_name)
                file_counter = file_counter + 1
            except IOError as err:
                raise Exception("Couldn't open file " + full_name)
Example #14
0
    def apply(self, backup_base, backup):
        uid = backup.get_collection_uid(self.collection_string)
        cluster = Cluster("couchbase://" + backup_base.master.ip,
                          authenticator=PasswordAuthenticator(
                              "Administrator", "password"))
        bucket = cluster.bucket(self.collection_string.bucket_name)
        scope = bucket.scope(self.collection_string.scope_name)
        collection = scope.collection(self.collection_string.collection_name)

        set_result = collection.mutate_in(self.key, [
            SD.upsert(self.path, self.value, xattr=True, create_parents=True)
        ])
        backup.set_subdoc(self.collection_string, self.key, self.path,
                          self.value, Tag.XATTR_CHANGED)
Example #15
0
class CouchDbAccess:
    def __init__(self):
        # using a local server
        self.cluster = Cluster(
            'couchbase://localhost:8091',
            ClusterOptions(
                PasswordAuthenticator('Administrator', 'password1234')))

    def upsert_doc(self, doc, doc_id, bucket_name):
        print("Upsert document: ")
        cb = self.cluster.bucket(bucket_name)
        coll = cb.default_collection()
        try:
            result = coll.insert(doc_id, doc)
            print(result.cas)
        except Exception as e:
            print(e)
Example #16
0
    def _get_connection(self):
        """Connect to the Couchbase server."""
        if self._connection is None:
            if self.host and self.port:
                uri = f"couchbase://{self.host}:{self.port}"
            else:
                uri = f"couchbase://{self.host}"
            if self.username and self.password:
                opt = PasswordAuthenticator(self.username, self.password)
            else:
                opt = None

            cluster = Cluster(uri, opt)

            bucket = cluster.bucket(self.bucket)

            self._connection = bucket.default_collection()
        return self._connection
Example #17
0
class ClusterTestCase(ConnectionTestCase):
    def setUp(self, **kwargs):
        self.factory = Bucket
        super(ClusterTestCase, self).setUp()
        connargs = self.cluster_info.make_connargs()
        connstr_abstract = ConnectionString.parse(
            connargs.pop('connection_string'))
        bucket_name = connstr_abstract.bucket
        connstr_abstract.bucket = None
        connstr_abstract.set_option('enable_collections', 'true')
        self.cluster = Cluster(
            connstr_abstract,
            ClusterOptions(
                ClassicAuthenticator(self.cluster_info.admin_username,
                                     self.cluster_info.admin_password)))
        self.admin = self.make_admin_connection()
        self.bucket = self.cluster.bucket(bucket_name, **connargs)
        self.bucket_name = bucket_name
Example #18
0
 def connection(self, client_ip, bucket_name, user, password):
     log.info(
         "Bucket name for connection is ---- {0}, username -- {1}, ----- password -- {2}".format(bucket_name, user, \
                                                                                                 password))
     result = False
     connection_string = 'couchbase://' + client_ip + '/' + bucket_name + '?username='******'&select_bucket=true'
     log.info(
         " Value of connection string is - {0}".format(connection_string))
     time.sleep(2)
     try:
         cluster = Cluster(
             'couchbase://' + client_ip,
             ClusterOptions(PasswordAuthenticator(user, password)))
         cb = cluster.bucket(bucket_name)
         default_collection = cb.default_collection()
         if cb is not None:
             result = True
             return default_collection, result
     except Exception as ex:
         log.info("Exception in creating an SDK connection {0}".format(ex))
         return result
Example #19
0
def connect_to_cluster(host: str,
                       user: str,
                       password: str,
                       bucket: str,
                       services: List[ServiceType] = [ServiceType.Query]):
    """Creates a connection to a cluster and checks its connected to the given services before returning."""
    cluster = Cluster(host,
                      ClusterOptions(PasswordAuthenticator(user, password)))
    cb = cluster.bucket(bucket)  # pylint: disable=unused-variable
    for _ in range(100):
        result = cb.ping(PingOptions(service_types=services))
        ready = True
        for service in services:
            try:
                if result.endpoints[service][0].state != PingState.OK:
                    ready = False
            except (KeyError, IndexError) as e:
                raise AssertionError(
                    f"Service {service.value} not available") from e
        if ready:
            return cluster, cb
        time.sleep(1)
    raise AssertionError("Failed to connect to cluster")
from couchbase.cluster import Cluster, ClusterOptions, Bucket, PasswordAuthenticator
from couchbase_core.views.iterator import View
from config import dbuser, dbpass, dbbucket as bucket_name, dbport
import os
import fnmatch

bucketname = 'bgch-cb-api'
cluster = Cluster('couchbase://localhost',
                  ClusterOptions(PasswordAuthenticator(dbuser, dbpass)))

bucket = cluster.bucket(bucketname)
coll = bucket.default_collection()
print(bucketname)
print(bucket)
print(coll)

dockey = "alert!00007e32-ad47-4bd4-ba64-f8dbcb10033d"
dockey = "00007e32-ad47-4bd4-ba64-f8dbcb10033d"
# dockey = "_design/alert"

result = coll.get(dockey)
content = result.content_as[str]
print(content)

viewId = "whitelist"
connection_timeout = 60000
limit = 6
inclusive_end = True
skip = 0
stale = False
group = True
Example #21
0
import os
import pprint
import sys
import couchbase
import requests
from datetime import datetime
from couchbase.cluster import Cluster, ClusterOptions
from couchbase_core.cluster import PasswordAuthenticator

# CONNECT DB
cluster = Cluster(
    'couchbase://couchbase',
    ClusterOptions(
        PasswordAuthenticator(os.environ.get('DB_USER'),
                              os.environ.get('DB_PASSWORD'))))

# BUCKET ITEMS
cb = cluster.bucket('items').default_collection()

# CREATION DES INDEX
cb.query('CREATE PRIMARY INDEX `items-primary-index` ON `items` USING GSI;'
         ).execute()
cb.query('CREATE INDEX `items-corrupted-index` ON items(corrupted) USING GSI;'
         ).execute()
cb.query(
    'CREATE INDEX `items-learning-index` ON items(learningData) USING GSI;'
).execute()
ap = argparse.ArgumentParser()

ap.add_argument("--cb_server", default="172.23.121.84")
ap.add_argument("--cb_username", default="Administrator")
ap.add_argument("--cb_password", default="password")
ap.add_argument("versions")

args = vars(ap.parse_args())

cluster = Cluster(
    "couchbase://" + args["cb_server"],
    ClusterOptions(
        PasswordAuthenticator(args["cb_username"], args["cb_password"])))

server_bucket = cluster.bucket("server")
greenboard_bucket = cluster.bucket("greenboard")
greenboard_collection = greenboard_bucket.default_collection()

supplied_versions = args["versions"].split(",")
versions = set()

for v in supplied_versions:
    for version in list(
            server_bucket.query(
                "select raw `build` from server where `build` like '%{}%' group by `build`"
                .format(v))):
        versions.add(version)

for version in versions:
    logger.info("fixing {}".format(version))
Example #23
0
# needed for any cluster connection
from couchbase.cluster import Cluster, ClusterOptions
from couchbase_core.cluster import PasswordAuthenticator

# needed to support SQL++ (N1QL) query
from couchbase.cluster import QueryOptions

# get a reference to our cluster
cluster = Cluster(
    'couchbase://localhost',
    ClusterOptions(PasswordAuthenticator('Administrator', 'password')))
# end::connect[]

# tag::bucket[]
# get a reference to our bucket
cb = cluster.bucket('travel-sample')
# end::bucket[]

# tag::default-collection[]
# get a reference to the default collection
cb_coll = cb.default_collection()

# end::default-collection[]


# tag::upsert-func[]
def upsert_document(doc):
    print("\nUpsert CAS: ")
    try:
        # key will equal: "airline_8091"
        key = doc["type"] + "_" + str(doc["id"])
        "--build-url",
        action="store",
        type=str,
        help="The URL of the Jenkins job to record in the results")
    parser.add_argument("--build",
                        action="store",
                        type=str,
                        help="The build version to record in the results")
    args = parser.parse_args()

    if args.password is None:
        args.password = getpass("Enter server password: "******"Connecting to couchbase://{}:8091".format(args.server))
    cluster = Cluster(
        "couchbase://{}:8091".format(args.server),
        ClusterOptions(PasswordAuthenticator(args.username, args.password)))

    all_metrics = _get_metrics()
    seen_metrics = set()
    updated_benchmarks = _update_benchmarks(args.directory)
    metrics_bucket = cluster.bucket("metrics")
    benchmarks_bucket = cluster.bucket("benchmarks")
    for mark in updated_benchmarks:
        benchmarks_bucket.upsert(str(uuid.uuid4()), mark)
        if mark["metric"] not in seen_metrics:
            seen_metrics.add(mark["metric"])
            metric = next(
                (m for m in all_metrics if m["id"] == mark["metric"]), None)
            if metric is not None:
                metrics_bucket.upsert(mark["metric"], metric)
Example #25
0
# needed for any cluster connection
from couchbase.cluster import Cluster, ClusterOptions
from couchbase_core.cluster import PasswordAuthenticator

# needed to support SQL++ (N1QL) query
from couchbase.cluster import QueryOptions

# get a reference to our cluster
cluster = Cluster('couchbase://localhost',
                  ClusterOptions(
                      PasswordAuthenticator('Administrator', db_password)),
                  lockmode=2)  # added lockmode to avoid locking error

# get a reference to our bucket
cb = cluster.bucket('events')

# get a reference to the default collection
cb_coll = cb.default_collection()

app = Flask(__name__)


@app.route("/webhooks/answer")
def answer():
    params = request.args
    pprint(params)
    return jsonify(ncco)


@app.route("/webhooks/event", methods=['POST'])
from couchbase.cluster import Cluster
from couchbase.auth import PasswordAuthenticator
from couchbase.management.views import (View, DesignDocument,
                                        DesignDocumentNamespace,
                                        DesignDocumentNotFoundException)

# tag::create_view_mgr[]
cluster = Cluster("couchbase://localhost",
                  authenticator=PasswordAuthenticator("Administrator",
                                                      "password"))

# For Server versions 6.5 or later you do not need to open a bucket here
bucket = cluster.bucket("travel-sample")

view_manager = bucket.view_indexes()
# end::create_view_mgr[]

# tag::create_view[]
design_doc = DesignDocument(
    name="landmarks",
    views={
        "by_country":
        View(
            map=
            "function (doc, meta) { if (doc.type == 'landmark') { emit([doc.country, doc.city], null); } }"
        ),
        "by_activity":
        View(
            map=
            "function (doc, meta) { if (doc.type == 'landmark') { emit(doc.activity, null); } }",
            reduce="_count")
Example #27
0
from couchbase import enable_logging

# NOTE: for simple test to see output, drop the threshold
#         ex:  tracing_threshold_kv=timedelta(microseconds=1)

# tag::threshold_logging_config[]
# configure logging
logging.basicConfig(stream=sys.stderr, level=logging.INFO)
# setup couchbase logging
enable_logging()

tracing_opts = ClusterTracingOptions(
    tracing_threshold_queue_size=10,
    tracing_threshold_kv=timedelta(milliseconds=500))

cluster_opts = ClusterOptions(authenticator=PasswordAuthenticator(
    "Administrator",
    "password"),
    tracing_options=tracing_opts)

cluster = Cluster(
    "couchbase://localhost",
    options=cluster_opts
)
# end::threshold_logging_config[]

collection = cluster.bucket("beer-sample").default_collection()

for _ in range(100):
    collection.get("21st_amendment_brewery_cafe")
from couchbase.cluster import Cluster
from couchbase.auth import PasswordAuthenticator
from couchbase.management.queries import CreatePrimaryQueryIndexOptions
from couchbase.management.users import User, Role

bucket_name = "travel-sample"
username = "******"
pw = "test-passw0rd!"

adm_cluster = Cluster("couchbase://localhost",
                      authenticator=PasswordAuthenticator(
                          "Administrator", "password"))
# For Server versions 6.5 or later you do not need to open a bucket here
adm_bucket = adm_cluster.bucket(bucket_name)

# tag::create_user[]
user_manager = adm_cluster.users()
user = User(
    username=username,
    display_name="Test User",
    roles=[
        # Roles required for reading data from bucket
        Role(name="data_reader", bucket="*"),
        Role(name="query_select", bucket="*"),
        # Roles require for writing data to bucket
        Role(name="data_writer", bucket=bucket_name),
        Role(name="query_insert", bucket=bucket_name),
        Role(name="query_delete", bucket=bucket_name),
        # Role required for idx creation on bucket
        Role(name="query_manage_index", bucket=bucket_name),
    ],
Example #29
0
from uuid import uuid4
from datetime import datetime
import os
import traceback
from configparser import ConfigParser
from requests.auth import HTTPBasicAuth

CB_BUCKET = os.environ.get("CB_BUCKET") or "system_test_dashboard"
CB_USERNAME = os.environ.get("CB_USERNAME") or "Administrator"
CB_PASSWORD = os.environ.get("CB_PASSWORD") or "password"

cluster = Cluster(
    "couchbase://{}".format(os.environ["CB_SERVER"]),
    ClusterOptions(PasswordAuthenticator(CB_USERNAME, CB_PASSWORD),
                   lockmode=LockMode.WAIT))
bucket = cluster.bucket(CB_BUCKET)
collection = bucket.default_collection()

requests_cache.install_cache(expire_after=60, backend="memory")

app = Flask("systen_testing_dashbord")

app.secret_key = b'\xe0\xac#\x06\xe3\xc5\x19\xd6\xfd\xaf+e\xb9\xd0\xb0\x1f'

LAUNCHER_TO_PARSER_CACHE = {}
JENKINS_PREFIX = "http://qa.sc.couchbase.com/job/"


def fetch_launchers():
    global LAUNCHERS
    try:
Example #30
0
from datetime import timedelta
from couchbase.cluster import Cluster
from couchbase.collection import GetOptions
class Migrating(object):
    pass

#tag::timeoutbuilder[]
# SDK 3 equivalent
cluster=Cluster("couchbases://10.192.1.104")
collection=cluster.bucket("default").default_collection()
collection.timeout=5
#end::timeoutbuilder[]

# not applicable
#natag::shutdown[]
# ClusterEnvironment env = ClusterEnvironment.create();
# Cluster cluster = Cluster.connect(
#     "127.0.0.1",
#                   // pass the custom environment through the cluster options
# clusterOptions("user", "pass").environment(env)
# );
#
# // first disconnect, then shutdown the environment
# cluster.disconnect();
# env.shutdown();
#naend::shutdown[]


#natag::sysprops[]
# not applicable
# Will set the max http connections to 23