コード例 #1
0
def wait_for_couchbase_conn(manager, **kwargs):
    """Wait for readiness/availability of Couchbase server based on connection status.

    :param manager: An instance of :class:`~pygluu.containerlib.manager._Manager`.
    """
    host = os.environ.get("GLUU_COUCHBASE_URL", "localhost")
    user = get_couchbase_user(manager)
    password = get_couchbase_password(manager)

    cb_client = CouchbaseClient(host, user, password)
    req = cb_client.get_buckets()

    if not req.ok:
        raise WaitError(f"Unable to connect to host in {host} list")
コード例 #2
0
class CouchbaseBackend(object):
    def __init__(self, manager):
        hostname = GLUU_COUCHBASE_URL
        user = get_couchbase_user(manager)
        password = get_couchbase_password(manager)
        self.client = CouchbaseClient(hostname, user, password)
        self.manager = manager

    def create_buckets(self, bucket_mappings, bucket_type="couchbase"):
        sys_info = self.client.get_system_info()

        if not sys_info:
            raise RuntimeError(
                "Unable to get system info from Couchbase; aborting ...")

        ram_info = sys_info["storageTotals"]["ram"]

        total_mem = (ram_info['quotaTotalPerNode'] -
                     ram_info['quotaUsedPerNode']) / (1024 * 1024)
        # the minimum memory is a sum of required buckets + minimum mem for `gluu` bucket
        min_mem = sum(
            [value["mem_alloc"] for value in bucket_mappings.values()]) + 100

        logger.info(
            "Memory size per node for Couchbase buckets was determined as {} MB"
            .format(total_mem))
        logger.info(
            "Minimum memory size per node for Couchbase buckets was determined as {} MB"
            .format(min_mem))

        if total_mem < min_mem:
            logger.error(
                "Available quota on couchbase node is less than {} MB".format(
                    min_mem))

        # always create `gluu` bucket even when `default` mapping stored in LDAP
        if GLUU_PERSISTENCE_TYPE == "hybrid" and GLUU_PERSISTENCE_LDAP_MAPPING == "default":
            memsize = 100

            logger.info(
                "Creating bucket {0} with type {1} and RAM size {2}".format(
                    "gluu", bucket_type, memsize))
            req = self.client.add_bucket("gluu", memsize, bucket_type)
            if not req.ok:
                logger.warn("Failed to create bucket {}; reason={}".format(
                    "gluu", req.text))

        req = self.client.get_buckets()
        if req.ok:
            remote_buckets = tuple([bckt["name"] for bckt in req.json()])
        else:
            remote_buckets = tuple([])

        for name, mapping in bucket_mappings.iteritems():
            if mapping["bucket"] in remote_buckets:
                continue

            memsize = int((mapping["mem_alloc"] / float(min_mem)) * total_mem)

            logger.info(
                "Creating bucket {0} with type {1} and RAM size {2}".format(
                    mapping["bucket"], bucket_type, memsize))
            req = self.client.add_bucket(mapping["bucket"], memsize,
                                         bucket_type)
            if not req.ok:
                logger.warn("Failed to create bucket {}; reason={}".format(
                    mapping["bucket"], req.text))

    def create_indexes(self, bucket_mappings):
        buckets = [
            mapping["bucket"] for _, mapping in bucket_mappings.iteritems()
        ]

        with open("/app/static/couchbase_index.json") as f:
            indexes = json.loads(f.read())

        for bucket in buckets:
            if bucket not in indexes:
                continue

            query_file = "/app/tmp/index_{}.n1ql".format(bucket)

            logger.info(
                "Running Couchbase index creation for {} bucket (if not exist)"
                .format(bucket))

            with open(query_file, "w") as f:
                index_list = indexes.get(bucket, {})
                index_names = []

                for index in index_list.get("attributes", []):
                    if '(' in ''.join(index):
                        attr_ = index[0]
                        index_name_ = index[0].replace('(', '_').replace(
                            ')', '_').replace('`', '').lower()
                        if index_name_.endswith('_'):
                            index_name_ = index_name_[:-1]
                        index_name = 'def_{0}_{1}'.format(bucket, index_name_)
                    else:
                        attr_ = ','.join(['`{}`'.format(a) for a in index])
                        index_name = "def_{0}_{1}".format(
                            bucket, '_'.join(index))

                    f.write(
                        'CREATE INDEX %s ON `%s`(%s) USING GSI WITH {"defer_build":true};\n'
                        % (index_name, bucket, attr_))
                    index_names.append(index_name)

                if index_names:
                    f.write('BUILD INDEX ON `%s` (%s) USING GSI;\n' %
                            (bucket, ', '.join(index_names)))

                sic = 1
                for attribs, wherec in index_list.get("static", []):
                    attrquoted = []

                    for a in attribs:
                        if '(' not in a:
                            attrquoted.append('`{}`'.format(a))
                        else:
                            attrquoted.append(a)
                    attrquoteds = ', '.join(attrquoted)

                    f.write(
                        'CREATE INDEX `{0}_static_{1:02d}` ON `{0}`({2}) WHERE ({3})\n'
                        .format(bucket, sic, attrquoteds, wherec))
                    sic += 1

            # exec query
            with open(query_file) as f:
                for line in f:
                    query = line.strip()
                    if not query:
                        continue

                    req = self.client.exec_query(query)
                    if not req.ok:
                        # the following code should be ignored
                        # - 4300: index already exists
                        # - 5000: index already built
                        error = req.json()["errors"][0]
                        if error["code"] in (4300, 5000):
                            continue
                        logger.warn(
                            "Failed to execute query, reason={}".format(
                                error["msg"]))

    def import_ldif(self, bucket_mappings):
        ctx = prepare_template_ctx(self.manager)
        attr_processor = AttrProcessor()

        for _, mapping in bucket_mappings.iteritems():
            for file_ in mapping["files"]:
                src = "/app/templates/ldif/{}".format(file_)
                dst = "/app/tmp/{}".format(file_)
                render_ldif(src, dst, ctx)
                parser = LDIFParser(open(dst))

                query_file = "/app/tmp/{}.n1ql".format(file_)

                with open(query_file, "a+") as f:
                    for dn, entry in parser.parse():
                        if len(entry) <= 2:
                            continue

                        key = get_key_from(dn)
                        entry["dn"] = [dn]
                        entry = transform_entry(entry, attr_processor)
                        data = json.dumps(entry)
                        # using INSERT will cause duplication error, but the data is left intact
                        query = 'INSERT INTO `%s` (KEY, VALUE) VALUES ("%s", %s);\n' % (
                            mapping["bucket"], key, data)
                        f.write(query)

                # exec query
                logger.info(
                    "Importing {} file into {} bucket (if needed)".format(
                        file_, mapping["bucket"]))
                with open(query_file) as f:
                    for line in f:
                        query = line.strip()
                        if not query:
                            continue

                        req = self.client.exec_query(query)
                        if not req.ok:
                            logger.warn(
                                "Failed to execute query, reason={}".format(
                                    req.json()))

    def initialize(self):
        def is_initialized():
            persistence_type = os.environ.get("GLUU_PERSISTENCE_TYPE",
                                              "couchbase")
            ldap_mapping = os.environ.get("GLUU_PERSISTENCE_LDAP_MAPPING",
                                          "default")

            # only `gluu` and `gluu_user` buckets that may have initial data;
            # these data also affected by LDAP mapping selection;
            # by default we will choose the `gluu` bucket
            bucket, key = "gluu", "configuration_oxtrust"

            # if `hybrid` is selected and default mapping is stored in LDAP,
            # the `gluu` bucket won't have data, hence we check the `gluu_user` instead
            if persistence_type == "hybrid" and ldap_mapping == "default":
                bucket, key = "gluu_user", "groups_60B7"

            query = "SELECT objectClass FROM {0} USE KEYS '{1}'".format(
                bucket, key)

            req = self.client.exec_query(query)
            if req.ok:
                data = req.json()
                return bool(data["results"])
            return False

        should_skip = as_boolean(
            os.environ.get("GLUU_PERSISTENCE_SKIP_EXISTING", True), )
        if should_skip and is_initialized():
            logger.info("Couchbase backend already initialized")
            return

        bucket_mappings = get_bucket_mappings()

        time.sleep(5)
        self.create_buckets(bucket_mappings)

        time.sleep(5)
        self.create_indexes(bucket_mappings)

        time.sleep(5)
        self.import_ldif(bucket_mappings)