Exemple #1
0
class CouchbaseBackend(BaseBackend):
    def __init__(self, host, user, password):
        self.backend = CouchbaseClient(host, user, password)

    def get_configuration(self):
        req = self.backend.exec_query(
            "SELECT oxTrustCacheRefreshServerIpAddress, gluuVdsCacheRefreshEnabled "
            "FROM `gluu` "
            "USE KEYS 'configuration'")

        if not req.ok:
            return {}

        config = req.json()["results"][0]

        if not config:
            return {}

        config.update({"id": "configuration"})
        return config

    def update_configuration(self, id_, ip):
        req = self.backend.exec_query(
            "UPDATE `gluu` "
            "USE KEYS '{0}' "
            "SET oxTrustCacheRefreshServerIpAddress='{1}' "
            "RETURNING oxTrustCacheRefreshServerIpAddress".format(id_, ip))

        result = {
            "success": req.ok,
            "message": req.text,
        }
        return result
class CouchbaseBackend(BaseBackend):
    def __init__(self, host, user, password):
        self.backend = CouchbaseClient(host, user, password)

    def get_oxauth_config(self):
        req = self.backend.exec_query(
            "SELECT oxRevision, oxAuthConfDynamic, oxAuthConfWebKeys "
            "FROM `gluu` "
            "USE KEYS 'configuration_oxauth'", )
        if not req.ok:
            return {}

        config = req.json()["results"][0]

        if not config:
            return {}

        config.update({"id": "configuration_oxauth"})
        return config

    def modify_oxauth_config(self, id_, ox_rev, conf_dynamic, conf_webkeys):
        req = self.backend.exec_query(
            "UPDATE `gluu` "
            "USE KEYS '{0}' "
            "SET oxRevision={1}, oxAuthConfDynamic={2}, oxAuthConfWebKeys={3} "
            "RETURNING oxRevision".format(
                id_,
                ox_rev,
                json.dumps(conf_dynamic),
                json.dumps(conf_webkeys),
            ))

        if not req.ok:
            return False
        return True
class CouchbaseBackend:
    def __init__(self, manager):
        hosts = os.environ.get("GLUU_COUCHBASE_URL", "localhost")
        user = get_couchbase_user(manager)
        password = get_couchbase_password(manager)
        self.client = CouchbaseClient(hosts, user, password)

    def get_entry(self, key, filter_="", attrs=None, **kwargs):
        bucket = kwargs.get("bucket")
        req = self.client.exec_query(
            "SELECT META().id, {0}.* FROM {0} USE KEYS '{1}'".format(bucket, key)
        )

        if not req.ok:
            return

        try:
            attrs = req.json()["results"][0]
            return Entry(attrs.pop("id"), attrs)
        except IndexError:
            return

    def modify_entry(self, key, attrs=None, **kwargs):
        attrs = attrs or {}
        bucket = kwargs.get("bucket")
        del_flag = kwargs.get("delete_attr", False)

        if del_flag:
            mod_kv = "UNSET {}".format(
                ",".join([k for k, _ in attrs.items()])
            )
        else:
            mod_kv = "SET {}".format(
                ",".join(["{}={}".format(k, json.dumps(v)) for k, v in attrs.items()])
            )

        query = "UPDATE {} USE KEYS '{}' {}".format(bucket, key, mod_kv)
        req = self.client.exec_query(query)
        if req.ok:
            resp = req.json()
            return bool(resp["status"] == "success"), resp["status"]
        return False, ""

    def add_entry(self, key, attrs=None, **kwargs):
        attrs = attrs or {}
        bucket = kwargs.get("bucket")

        query = 'INSERT INTO `%s` (KEY, VALUE) VALUES ("%s", %s);\n' % (bucket, key, json.dumps(attrs))
        req = self.client.exec_query(query)

        if req.ok:
            resp = req.json()
            return bool(resp["status"] == "success"), resp["status"]
        return False, ""
    def __init__(self, manager):
        hostname = os.environ.get("GLUU_COUCHBASE_URL", "localhost")
        user = get_couchbase_superuser(manager) or get_couchbase_user(manager)

        password = ""
        with contextlib.suppress(FileNotFoundError):
            password = get_couchbase_superuser_password(manager)
        password = password or get_couchbase_password(manager)

        self.client = CouchbaseClient(hostname, user, password)
        self.manager = manager
def wait_for_couchbase_conn(manager, **kwargs):
    """Wait for readiness/availability of Couchbase server based on connection status.

    :param manager: An instance of :class:`~pygluu.containerlib.manager._Manager`.
    """
    host = os.environ.get("GLUU_COUCHBASE_URL", "localhost")
    user = get_couchbase_user(manager)
    password = get_couchbase_password(manager)

    cb_client = CouchbaseClient(host, user, password)
    req = cb_client.get_buckets()

    if not req.ok:
        raise WaitError(f"Unable to connect to host in {host} list")
def wait_for_couchbase(manager, **kwargs):
    """Wait for readiness/availability of Couchbase server based on existing entry.

    :param manager: An instance of :class:`~pygluu.containerlib.manager._Manager`.
    """
    host = os.environ.get("GLUU_COUCHBASE_URL", "localhost")
    user = get_couchbase_user(manager)
    password = get_couchbase_password(manager)

    persistence_type = os.environ.get("GLUU_PERSISTENCE_TYPE", "couchbase")
    ldap_mapping = os.environ.get("GLUU_PERSISTENCE_LDAP_MAPPING", "default")
    bucket_prefix = os.environ.get("GLUU_COUCHBASE_BUCKET_PREFIX", "gluu")

    # only _base_ and _user_ buckets that may have initial data;
    # these data also affected by LDAP mapping selection;
    # by default we will choose the _base_ bucket
    bucket, key = bucket_prefix, "configuration_oxtrust"

    # if `hybrid` is selected and default mapping is stored in LDAP,
    # the _base_ bucket won't have data, hence we check the _user_ bucket
    if persistence_type == "hybrid" and ldap_mapping == "default":
        bucket, key = f"{bucket_prefix}_user", "groups_60B7"

    cb_client = CouchbaseClient(host, user, password)

    req = cb_client.exec_query(
        f"SELECT objectClass FROM {bucket} USE KEYS $key",
        key=key,
    )

    if not req.ok:
        try:
            data = json.loads(req.text)
            err = data["errors"][0]["msg"]
        except (ValueError, KeyError, IndexError):
            err = req.reason
        raise WaitError(err)

    # request is OK, but result is not found
    data = req.json()
    if not data["results"]:
        raise WaitError(f"Missing document {key} in bucket {bucket}")
Exemple #7
0
 def __init__(self, host, user, password):
     self.backend = CouchbaseClient(host, user, password)
Exemple #8
0
def test_no_couchbase_hosts(client_prop):
    from pygluu.containerlib.persistence.couchbase import CouchbaseClient

    client = CouchbaseClient("", "admin", "password")
    with pytest.raises(ValueError):
        getattr(client, client_prop)
 def __init__(self, manager):
     hosts = os.environ.get("GLUU_COUCHBASE_URL", "localhost")
     user = get_couchbase_user(manager)
     password = get_couchbase_password(manager)
     self.client = CouchbaseClient(hosts, user, password)
 def __init__(self, manager):
     hostname = GLUU_COUCHBASE_URL
     user = get_couchbase_user(manager)
     password = get_couchbase_password(manager)
     self.client = CouchbaseClient(hostname, user, password)
     self.manager = manager
class CouchbaseBackend(object):
    def __init__(self, manager):
        hostname = GLUU_COUCHBASE_URL
        user = get_couchbase_user(manager)
        password = get_couchbase_password(manager)
        self.client = CouchbaseClient(hostname, user, password)
        self.manager = manager

    def create_buckets(self, bucket_mappings, bucket_type="couchbase"):
        sys_info = self.client.get_system_info()

        if not sys_info:
            raise RuntimeError(
                "Unable to get system info from Couchbase; aborting ...")

        ram_info = sys_info["storageTotals"]["ram"]

        total_mem = (ram_info['quotaTotalPerNode'] -
                     ram_info['quotaUsedPerNode']) / (1024 * 1024)
        # the minimum memory is a sum of required buckets + minimum mem for `gluu` bucket
        min_mem = sum(
            [value["mem_alloc"] for value in bucket_mappings.values()]) + 100

        logger.info(
            "Memory size per node for Couchbase buckets was determined as {} MB"
            .format(total_mem))
        logger.info(
            "Minimum memory size per node for Couchbase buckets was determined as {} MB"
            .format(min_mem))

        if total_mem < min_mem:
            logger.error(
                "Available quota on couchbase node is less than {} MB".format(
                    min_mem))

        # always create `gluu` bucket even when `default` mapping stored in LDAP
        if GLUU_PERSISTENCE_TYPE == "hybrid" and GLUU_PERSISTENCE_LDAP_MAPPING == "default":
            memsize = 100

            logger.info(
                "Creating bucket {0} with type {1} and RAM size {2}".format(
                    "gluu", bucket_type, memsize))
            req = self.client.add_bucket("gluu", memsize, bucket_type)
            if not req.ok:
                logger.warn("Failed to create bucket {}; reason={}".format(
                    "gluu", req.text))

        req = self.client.get_buckets()
        if req.ok:
            remote_buckets = tuple([bckt["name"] for bckt in req.json()])
        else:
            remote_buckets = tuple([])

        for name, mapping in bucket_mappings.iteritems():
            if mapping["bucket"] in remote_buckets:
                continue

            memsize = int((mapping["mem_alloc"] / float(min_mem)) * total_mem)

            logger.info(
                "Creating bucket {0} with type {1} and RAM size {2}".format(
                    mapping["bucket"], bucket_type, memsize))
            req = self.client.add_bucket(mapping["bucket"], memsize,
                                         bucket_type)
            if not req.ok:
                logger.warn("Failed to create bucket {}; reason={}".format(
                    mapping["bucket"], req.text))

    def create_indexes(self, bucket_mappings):
        buckets = [
            mapping["bucket"] for _, mapping in bucket_mappings.iteritems()
        ]

        with open("/app/static/couchbase_index.json") as f:
            indexes = json.loads(f.read())

        for bucket in buckets:
            if bucket not in indexes:
                continue

            query_file = "/app/tmp/index_{}.n1ql".format(bucket)

            logger.info(
                "Running Couchbase index creation for {} bucket (if not exist)"
                .format(bucket))

            with open(query_file, "w") as f:
                index_list = indexes.get(bucket, {})
                index_names = []

                for index in index_list.get("attributes", []):
                    if '(' in ''.join(index):
                        attr_ = index[0]
                        index_name_ = index[0].replace('(', '_').replace(
                            ')', '_').replace('`', '').lower()
                        if index_name_.endswith('_'):
                            index_name_ = index_name_[:-1]
                        index_name = 'def_{0}_{1}'.format(bucket, index_name_)
                    else:
                        attr_ = ','.join(['`{}`'.format(a) for a in index])
                        index_name = "def_{0}_{1}".format(
                            bucket, '_'.join(index))

                    f.write(
                        'CREATE INDEX %s ON `%s`(%s) USING GSI WITH {"defer_build":true};\n'
                        % (index_name, bucket, attr_))
                    index_names.append(index_name)

                if index_names:
                    f.write('BUILD INDEX ON `%s` (%s) USING GSI;\n' %
                            (bucket, ', '.join(index_names)))

                sic = 1
                for attribs, wherec in index_list.get("static", []):
                    attrquoted = []

                    for a in attribs:
                        if '(' not in a:
                            attrquoted.append('`{}`'.format(a))
                        else:
                            attrquoted.append(a)
                    attrquoteds = ', '.join(attrquoted)

                    f.write(
                        'CREATE INDEX `{0}_static_{1:02d}` ON `{0}`({2}) WHERE ({3})\n'
                        .format(bucket, sic, attrquoteds, wherec))
                    sic += 1

            # exec query
            with open(query_file) as f:
                for line in f:
                    query = line.strip()
                    if not query:
                        continue

                    req = self.client.exec_query(query)
                    if not req.ok:
                        # the following code should be ignored
                        # - 4300: index already exists
                        # - 5000: index already built
                        error = req.json()["errors"][0]
                        if error["code"] in (4300, 5000):
                            continue
                        logger.warn(
                            "Failed to execute query, reason={}".format(
                                error["msg"]))

    def import_ldif(self, bucket_mappings):
        ctx = prepare_template_ctx(self.manager)
        attr_processor = AttrProcessor()

        for _, mapping in bucket_mappings.iteritems():
            for file_ in mapping["files"]:
                src = "/app/templates/ldif/{}".format(file_)
                dst = "/app/tmp/{}".format(file_)
                render_ldif(src, dst, ctx)
                parser = LDIFParser(open(dst))

                query_file = "/app/tmp/{}.n1ql".format(file_)

                with open(query_file, "a+") as f:
                    for dn, entry in parser.parse():
                        if len(entry) <= 2:
                            continue

                        key = get_key_from(dn)
                        entry["dn"] = [dn]
                        entry = transform_entry(entry, attr_processor)
                        data = json.dumps(entry)
                        # using INSERT will cause duplication error, but the data is left intact
                        query = 'INSERT INTO `%s` (KEY, VALUE) VALUES ("%s", %s);\n' % (
                            mapping["bucket"], key, data)
                        f.write(query)

                # exec query
                logger.info(
                    "Importing {} file into {} bucket (if needed)".format(
                        file_, mapping["bucket"]))
                with open(query_file) as f:
                    for line in f:
                        query = line.strip()
                        if not query:
                            continue

                        req = self.client.exec_query(query)
                        if not req.ok:
                            logger.warn(
                                "Failed to execute query, reason={}".format(
                                    req.json()))

    def initialize(self):
        def is_initialized():
            persistence_type = os.environ.get("GLUU_PERSISTENCE_TYPE",
                                              "couchbase")
            ldap_mapping = os.environ.get("GLUU_PERSISTENCE_LDAP_MAPPING",
                                          "default")

            # only `gluu` and `gluu_user` buckets that may have initial data;
            # these data also affected by LDAP mapping selection;
            # by default we will choose the `gluu` bucket
            bucket, key = "gluu", "configuration_oxtrust"

            # if `hybrid` is selected and default mapping is stored in LDAP,
            # the `gluu` bucket won't have data, hence we check the `gluu_user` instead
            if persistence_type == "hybrid" and ldap_mapping == "default":
                bucket, key = "gluu_user", "groups_60B7"

            query = "SELECT objectClass FROM {0} USE KEYS '{1}'".format(
                bucket, key)

            req = self.client.exec_query(query)
            if req.ok:
                data = req.json()
                return bool(data["results"])
            return False

        should_skip = as_boolean(
            os.environ.get("GLUU_PERSISTENCE_SKIP_EXISTING", True), )
        if should_skip and is_initialized():
            logger.info("Couchbase backend already initialized")
            return

        bucket_mappings = get_bucket_mappings()

        time.sleep(5)
        self.create_buckets(bucket_mappings)

        time.sleep(5)
        self.create_indexes(bucket_mappings)

        time.sleep(5)
        self.import_ldif(bucket_mappings)