Ejemplo n.º 1
0
 def _add_resource_group_templates(self, location):
     templates = dictionary_utils.get_dictionary_element(
         self._resources, RESOURCE_GROUP_TEMPLATE)
     self._add_named_elements(RESOURCE_GROUP_TEMPLATE, templates, location)
     return
def get_map_element_type(schema_map):
    additional = dictionary_utils.get_dictionary_element(schema_map, "additionalProperties")
    return get_type(additional)
Ejemplo n.º 3
0
    def _create_named_subtype_mbeans(self,
                                     type_name,
                                     model_nodes,
                                     base_location,
                                     log_created=False):
        """
        Create the MBeans for a single security provider type, such as "AuthenticationProvider".
        These are the only named subtype MBeans within security configuration, so no further checks are required.
        :param type_name: the model folder type
        :param model_nodes: the model dictionary of the specified model folder type
        :param base_location: the base location object to use to create the MBeans
        :param log_created: whether or not to log created at INFO level, by default it is logged at the FINE level
        :raises: CreateException: if an error occurs
        """
        _method_name = '_create_named_subtype_mbeans'

        self.logger.entering(type_name,
                             str(base_location),
                             log_created,
                             class_name=self.__class_name,
                             method_name=_method_name)

        if not self._is_type_valid(base_location, type_name):
            return

        # some providers may be skipped
        if not self._check_provider_type(type_name, model_nodes):
            return

        location = LocationContext(base_location).append_location(type_name)
        self._process_flattened_folder(location)

        # For create, delete the existing nodes, and re-add in order found in model in iterative code below
        self._delete_existing_providers(location)

        if model_nodes is None or len(model_nodes) == 0:
            return

        token_name = self.alias_helper.get_name_token(location)
        create_path = self.alias_helper.get_wlst_create_path(location)
        list_path = self.alias_helper.get_wlst_list_path(location)
        existing_folder_names = self._get_existing_folders(list_path)
        known_providers = self.alias_helper.get_model_subfolder_names(location)
        allow_custom = str(
            self.alias_helper.is_custom_folder_allowed(location))

        for model_name in model_nodes:
            model_node = model_nodes[model_name]

            if model_node is None:
                # The node is empty so nothing to do... move to the next named node.
                continue

            if len(model_node) != 1:
                # there should be exactly one type folder under the name folder
                ex = exception_helper.create_exception(self._exception_type,
                                                       'WLSDPLY-12117',
                                                       type_name, model_name,
                                                       len(model_node))
                self.logger.throwing(ex,
                                     class_name=self.__class_name,
                                     method_name=_method_name)
                raise ex

            model_type_subfolder_name = list(model_node.keys())[0]
            child_nodes = dictionary_utils.get_dictionary_element(
                model_node, model_type_subfolder_name)

            # custom providers require special processing, they are not described in alias framework
            if allow_custom and (model_type_subfolder_name
                                 not in known_providers):
                self.custom_folder_helper.update_security_folder(
                    base_location, type_name, model_type_subfolder_name,
                    model_name, child_nodes)
                continue

            # for a known provider, process using aliases
            prov_location = LocationContext(location)
            name = self.wlst_helper.get_quoted_name_for_wlst(model_name)
            if token_name is not None:
                prov_location.add_name_token(token_name, name)

            wlst_base_provider_type, wlst_name = self.alias_helper.get_wlst_mbean_type_and_name(
                prov_location)

            prov_location.append_location(model_type_subfolder_name)
            wlst_type = self.alias_helper.get_wlst_mbean_type(prov_location)

            if wlst_name not in existing_folder_names:
                if log_created:
                    self.logger.info('WLSDPLY-12118',
                                     type_name,
                                     model_type_subfolder_name,
                                     name,
                                     create_path,
                                     class_name=self.__class_name,
                                     method_name=_method_name)
                else:
                    self.logger.fine('WLSDPLY-12118',
                                     type_name,
                                     model_type_subfolder_name,
                                     name,
                                     create_path,
                                     class_name=self.__class_name,
                                     method_name=_method_name)
                self.wlst_helper.cd(create_path)
                self.wlst_helper.create(wlst_name, wlst_type,
                                        wlst_base_provider_type)
            else:
                if log_created:
                    self.logger.info('WLSDPLY-12119',
                                     type_name,
                                     model_type_subfolder_name,
                                     name,
                                     create_path,
                                     class_name=self.__class_name,
                                     method_name=_method_name)
                else:
                    self.logger.fine('WLSDPLY-12119',
                                     type_name,
                                     model_type_subfolder_name,
                                     name,
                                     create_path,
                                     class_name=self.__class_name,
                                     method_name=_method_name)

            attribute_path = self.alias_helper.get_wlst_attributes_path(
                prov_location)
            self.wlst_helper.cd(attribute_path)

            self.logger.finest(
                'WLSDPLY-12111',
                self.alias_helper.get_model_folder_path(prov_location),
                self.wlst_helper.get_pwd(),
                class_name=self.__class_name,
                method_name=_method_name)
            self._set_attributes(prov_location, child_nodes)
            self._create_subfolders(prov_location, child_nodes)

        self.logger.exiting(class_name=self.__class_name,
                            method_name=_method_name)
        return
    def _update_server(self, name, dictionary, config_location):
        _method_name = '_update_server'

        # these imports are local, since they are only present in JRF environments.
        # this method is only called after that check has been made.
        from oracle.core.ojdl.weblogic.ODLConfiguration import CONFIG_DIR
        from oracle.core.ojdl.weblogic.ODLConfiguration import CONFIG_FILE
        from oracle.core.ojdl.logging.config import LoggingConfigurationDocument

        config_dir = File(self.model_context.get_domain_home(), CONFIG_DIR)
        server_dir = File(config_dir, name)
        config_file = File(server_dir, CONFIG_FILE)
        log_template_dir = config_dir.getParentFile()

        try:
            if config_file.exists():
                source_file = config_file
                FileUtils.validateWritableFile(config_file.getPath())
            else:
                # for dynamic servers, the logging config does not exist until the server is started.
                # read the from template file, verify that the server directory is present and writable.
                source_file = File(log_template_dir, LOGGING_TEMPLATE_FILE)
                FileUtils.validateExistingFile(source_file)
                if not server_dir.exists() and not server_dir.mkdirs():
                    ex = exception_helper.create_deploy_exception(
                        'WLSDPLY-19710', server_dir)
                    self.logger.throwing(ex,
                                         class_name=self.__class_name,
                                         method_name=_method_name)
                    raise ex
                FileUtils.validateWritableDirectory(server_dir.getPath())

            document = LoggingConfigurationDocument(
                FileInputStream(source_file))

            # configure AddJvmNumber
            add_jvm_number = dictionary_utils.get_element(
                dictionary, _ADD_JVM_NUMBER)
            if add_jvm_number is not None:
                document.setAddJvmNumber(
                    alias_utils.convert_boolean(add_jvm_number))

            # configure HandlerDefaults
            handler_defaults = dictionary_utils.get_dictionary_element(
                dictionary, _HANDLER_DEFAULTS)
            if handler_defaults is not None:
                for key in handler_defaults:
                    value = handler_defaults[key]
                    document.setHandlerDefault(key, _get_property_text(value))

            # configure Handlers
            # do these before loggers, in case new handlers are assigned to loggers
            existing_handler_names = document.getHandlerNames()
            handlers = dictionary_utils.get_dictionary_element(
                dictionary, _HANDLER)
            if handlers is not None:
                for handler_name in handlers:
                    handler = handlers[handler_name]
                    self._configure_handler(handler_name, handler, document,
                                            existing_handler_names,
                                            config_location)

            # configure Loggers
            existing_logger_names = document.getLoggerNames()
            loggers = dictionary_utils.get_dictionary_element(
                dictionary, _LOGGER)
            if loggers is not None:
                for logger_name in loggers:
                    logger = loggers[logger_name]
                    self._configure_logger(logger_name, logger, document,
                                           existing_logger_names,
                                           config_location)

            document.writeDocument(FileOutputStream(config_file))

        except (ParserConfigurationException, SAXException, IOException,
                IllegalArgumentException), ex:
            self.logger.severe('WLSDPLY-19707',
                               name,
                               ex.getLocalizedMessage(),
                               class_name=self.__class_name,
                               method_name=_method_name)
Ejemplo n.º 5
0
    def target_server_groups_to_servers(self, server_groups_to_target):
        """
        Target the server groups to the servers.
        :param server_groups_to_target: the list of server groups to target
        :raises: BundleAwareException of the specified type: if an error occurs
        """
        _method_name = 'target_server_groups_to_servers'

        self.logger.entering(server_groups_to_target, class_name=self.__class_name, method_name=_method_name)
        if len(server_groups_to_target) == 0:
            return list()

        location = LocationContext()
        root_path = self.aliases.get_wlst_attributes_path(location)
        self.wlst_helper.cd(root_path)

        # We need to get the effective list of servers for the domain.  Since any servers
        # referenced in the model have already been created but the templates may have
        # defined new servers not listed in the model, get the list from WLST.
        server_names = self._get_existing_server_names()
        # Get the clusters and and their members
        cluster_map = self._get_clusters_and_members_map()
        # Get any limits that may have been defined in the model
        domain_info = self.model.get_model_domain_info()
        server_group_targeting_limits = \
            dictionary_utils.get_dictionary_element(domain_info, SERVER_GROUP_TARGETING_LIMITS)
        if len(server_group_targeting_limits) > 0:
            server_group_targeting_limits = \
                self._get_server_group_targeting_limits(server_group_targeting_limits, cluster_map)

        self.logger.finer('WLSDPLY-12240', str(server_group_targeting_limits),
                          class_name=self.__class_name, method_name=_method_name)

        # Get the map of server names to server groups to target
        server_to_server_groups_map =\
            self._get_server_to_server_groups_map(self._admin_server_name,
                                                  server_names,
                                                  server_groups_to_target,
                                                  server_group_targeting_limits)  # type: dict
        self.logger.finer('WLSDPLY-12242', str(server_to_server_groups_map), class_name=self.__class_name,
                          method_name=_method_name)

        final_assignment_map = dict()
        # Target servers and dynamic clusters to the server group resources
        if len(server_names) > 0:
            for server, server_groups in server_to_server_groups_map.iteritems():
                if len(server_groups) > 0:
                    if server in server_names:
                        final_assignment_map[server] = server_groups

        #
        # Domain has not targeted the server groups to managed servers (configured), or the
        # domain has no managed servers (configured) but has user server groups. The resources for the
        # user server groups must be targeted before the write/update domain or the write/update will fail.
        # Thus assign the user server groups to the admin server.
        #
        # Because of the interaction of the working context in the different wlst helpers, the dynamic
        # clusters will be applied to the resources separately and after the write/update domain.
        #
        # (From original blurb)
        #  This is really a best effort attempt.  It works for JRF domains but it is certainly possible
        # that it may cause problems with other custom domain types.  Of course, creating a domain with
        # no managed servers is not a primary use case of this tool so do it and hope for the best...
        #
        # (New comment)
        # As we have added the intricacies of the dynamic clusters, if the targeting is to dynamic
        # clusters only, the set server groups with the admin server will get through the write/update domain
        # and the applyJRF with the dynamic cluster should theoretically unset the AdminServer on the user server
        # groups. It works with JRF type domains.

        if len(server_groups_to_target) > 0:
            if len(final_assignment_map) == 0:
                # This is a quickie to fix the issue where server groups are not targeted because no configured
                #  managed servers exist in the domain
                final_assignment_map[server_names[0]] = server_groups_to_target
            else:
                # If a server group or groups is not targeted in the assignments, log it to stdout
                no_targets = [server_target for server_target in server_groups_to_target if server_target not in
                              [server_target for row in final_assignment_map.itervalues() for
                               server_target in server_groups_to_target if server_target in row]]
                if len(no_targets) > 0:
                    self.logger.info('WLSDPLY-12248', no_targets,
                                     class_name=self.__class_name, method_name=_method_name)

        self.logger.exiting(result=str(final_assignment_map), class_name=self.__class_name, method_name=_method_name)
        return final_assignment_map
 def add_jms_system_resources(self, parent_dict, location):
     system_resources = dictionary_utils.get_dictionary_element(parent_dict, JMS_SYSTEM_RESOURCE)
     self._add_named_elements(JMS_SYSTEM_RESOURCE, system_resources, location)
     return
Ejemplo n.º 7
0
 def get_step_names(self):
     return dictionary_utils.get_dictionary_element(self._stage_dict, TestDefStage.STEP_NAMES)
Ejemplo n.º 8
0
    def _add_results(self, change_paths, is_delete=False):
        """
        Update the differences in the final model dictionary with the changes
        :param change_paths: Array of changes in delimited format
        :param is_delete: flag indicating to delete paths
        """
        parent_index = -2
        for change_path in change_paths:
            # change_path is the keys of changes in the piped format, such as:
            # resources|JDBCSystemResource|Generic2|JdbcResource|JDBCConnectionPoolParams|TestConnectionsOnReserve
            location, attribute_name, property_key = self._parse_change_path(
                change_path)
            is_folder_path = attribute_name is None

            if is_delete and not is_folder_path:
                # Skip adding if it is a delete of an attribute
                self.compare_msgs.add(('WLSDPLY-05701', change_path))
                continue

            # splitted is a tuple containing the next token, and a delimited string of remaining tokens
            splitted = change_path.split(PATH_TOKEN, 1)

            # change_tree will be a nested dictionary containing the change path parent elements.
            # change_tokens is a list of parent tokens in change_tree.
            change_tree = PyOrderedDict()
            change_tokens = []

            while len(splitted) > 1:
                tmp_folder = PyOrderedDict()
                tmp_folder[splitted[0]] = PyOrderedDict()
                if len(change_tree) > 0:
                    # traverse to the leaf folder
                    change_folder = change_tree
                    for token in change_tokens:
                        change_folder = change_folder[token]
                    change_folder[splitted[0]] = PyOrderedDict()
                    change_tokens.append(splitted[0])
                else:
                    change_tree = tmp_folder
                    change_tokens.append(splitted[0])
                splitted = splitted[1].split(PATH_TOKEN, 1)

            # key is the last name in the change path
            key = splitted[0]

            # find the specified folder in the change tree and in the current and previous models
            change_folder = change_tree
            current_folder = self.current_dict
            previous_folder = self.past_dict
            for token in change_tokens:
                change_folder = change_folder[token]
                current_folder = current_folder[token]
                previous_folder = dictionary_utils.get_dictionary_element(
                    previous_folder, token)

            # set the value in the change folder if present.
            # merge new and previous values if relevant.
            # add a comment if the previous value was found.
            if current_folder:
                current_value = current_folder[key]
                previous_value = dictionary_utils.get_element(
                    previous_folder, key)
                change_value, comment = self._get_change_info(
                    current_value, previous_value, location, attribute_name,
                    property_key)

                if comment:
                    # make comment key unique, key will not appear in output
                    comment_key = COMMENT_MATCH + comment
                    change_folder[comment_key] = comment
                change_folder[key] = change_value
            else:
                change_folder[key] = None

            # merge the change tree into the final model
            self.merge_dictionaries(self.final_changed_model, change_tree)

            # if it is a deletion then go back and update with '!'

            if is_delete:
                split_delete = change_path.split(PATH_TOKEN)
                # allowable_delete_length = len(allowable_delete.split(PATH_TOKEN))
                split_delete_length = len(split_delete)
                if is_folder_path:
                    app_key = split_delete[split_delete_length - 1]
                    parent_key = split_delete[parent_index]
                    debug("DEBUG: deleting folder %s from the model: key %s ",
                          change_path, app_key)
                    pointer_dict = self.final_changed_model
                    for k_item in split_delete:
                        if k_item == parent_key:
                            break
                        pointer_dict = pointer_dict[k_item]
                    del pointer_dict[parent_key][app_key]
                    # Special handling for deleting all resources in high level
                    if split_delete_length == 2 and app_key != 'WebAppContainer':
                        pointer_dict[parent_key][app_key] = PyOrderedDict()
                        old_keys = self.past_dict[parent_key][app_key].keys()
                        for old_key in old_keys:
                            pointer_dict[parent_key][app_key][
                                '!' + old_key] = PyOrderedDict()
                    else:
                        pointer_dict[parent_key]['!' +
                                                 app_key] = PyOrderedDict()
Ejemplo n.º 9
0
    def target_server_groups_to_servers(self, server_groups_to_target):
        """
        Target the server groups to the servers.
        :param server_groups_to_target: the list of server groups to target
        :raises: BundleAwareException of the specified type: if an error occurs
        """
        _method_name = '__target_server_groups_to_servers'

        self.logger.entering(server_groups_to_target,
                             class_name=self.__class_name,
                             method_name=_method_name)
        if len(server_groups_to_target) == 0:
            return

        location = LocationContext()
        root_path = self.alias_helper.get_wlst_attributes_path(location)
        self.wlst_helper.cd(root_path)

        # We need to get the effective list of servers for the domain.  Since any servers
        # referenced in the model have already been created but the templates may have
        # defined new servers not listed in the model, get the list from WLST.
        server_names = self._get_existing_server_names()

        # Get the clusters and and their members
        cluster_map = self._get_clusters_and_members_map()

        # Get any limits that may have been defined in the model
        domain_info = self.model.get_model_domain_info()
        server_group_targeting_limits = \
            dictionary_utils.get_dictionary_element(domain_info, SERVER_GROUP_TARGETING_LIMITS)
        if len(server_group_targeting_limits) > 0:
            server_group_targeting_limits = \
                self._get_server_group_targeting_limits(server_group_targeting_limits, cluster_map)

        # Get the map of server names to server groups to target
        server_to_server_groups_map =\
            self._get_server_to_server_groups_map(self._admin_server_name,
                                                  server_names,
                                                  server_groups_to_target,
                                                  server_group_targeting_limits)  # type: dict

        if len(server_names) > 1:
            for server, server_groups in server_to_server_groups_map.iteritems(
            ):
                if len(server_groups) > 0:
                    server_name = self.wlst_helper.get_quoted_name_for_wlst(
                        server)
                    self.logger.info('WLSDPLY-12224',
                                     str(server_groups),
                                     server_name,
                                     class_name=self.__class_name,
                                     method_name=_method_name)
                    self.wlst_helper.set_server_groups(server_name,
                                                       server_groups)

        elif len(server_group_targeting_limits) == 0:
            #
            # Domain has no managed servers and there were not targeting limits specified to target
            # server groups to the admin server so make sure that the server groups are targeted to
            # the admin server.
            #
            # This is really a best effort attempt.  It works for JRF domains but it is certainly possible
            # that it may cause problems with other custom domain types.  Of course, creating a domain with
            # no managed servers is not a primary use case of this tool so do it and hope for the best...
            #
            server_name = self.wlst_helper.get_quoted_name_for_wlst(
                server_names[0])
            self.wlst_helper.set_server_groups(server_name,
                                               server_groups_to_target)

        self.logger.exiting(class_name=self.__class_name,
                            method_name=_method_name)
        return
Ejemplo n.º 10
0
 def continue_when_fail(self):
     response = dictionary_utils.get_dictionary_element(self._stage_dict, TestDefStage.CONTINUE_WHEN_FAIL)
     if response is None:
         response = self._test_def_metadata.get_default_value(TestDefStage.CONTINUE_WHEN_FAIL)
     return response
Ejemplo n.º 11
0
 def _add_resource_groups(self, parent_dict, location):
     groups = dictionary_utils.get_dictionary_element(
         parent_dict, RESOURCE_GROUP)
     self._add_named_elements(RESOURCE_GROUP, groups, location)
     return
Ejemplo n.º 12
0
 def _add_partitions(self, location):
     partitions = dictionary_utils.get_dictionary_element(
         self._resources, PARTITION)
     self._add_named_elements(PARTITION, partitions, location)
     return
Ejemplo n.º 13
0
 def _add_partition_work_managers(self, parent_dict, location):
     managers = dictionary_utils.get_dictionary_element(
         parent_dict, PARTITION_WORK_MANAGER)
     self._add_named_elements(PARTITION_WORK_MANAGER, managers, location)
     return
def get_array_item_info(schema_map):
    return dictionary_utils.get_dictionary_element(schema_map, "items")
Ejemplo n.º 15
0
def _build_template_hash(model, aliases):
    """
    Create a dictionary of substitution values to apply to the templates.
    :param model: Model object used to derive values
    :param aliases: used to derive folder names
    :return: the hash dictionary
    """
    template_hash = dict()

    # domain name and prefix

    domain_name = dictionary_utils.get_element(model.get_model_topology(),
                                               NAME)
    if domain_name is None:
        domain_name = DEFAULT_WLS_DOMAIN_NAME

    template_hash[DOMAIN_NAME] = domain_name

    # should change spaces to hyphens?
    template_hash[DOMAIN_PREFIX] = domain_name.lower()

    # domain UID

    domain_uid = k8s_helper.get_domain_uid(domain_name)
    template_hash[DOMAIN_UID] = domain_uid

    # admin credential

    admin_secret = domain_uid + target_configuration_helper.WEBLOGIC_CREDENTIALS_SECRET_SUFFIX
    template_hash[WEBLOGIC_CREDENTIALS_SECRET] = admin_secret

    # clusters

    clusters = []
    cluster_list = dictionary_utils.get_dictionary_element(
        model.get_model_topology(), CLUSTER)
    for cluster_name in cluster_list:
        cluster_hash = dict()
        cluster_hash[CLUSTER_NAME] = cluster_name

        cluster_values = dictionary_utils.get_dictionary_element(
            cluster_list, cluster_name)
        server_count = k8s_helper.get_server_count(cluster_name,
                                                   cluster_values,
                                                   model.get_model())
        cluster_hash[REPLICAS] = str(server_count)
        clusters.append(cluster_hash)

    template_hash[CLUSTERS] = clusters

    # databases

    databases = []

    location = LocationContext().append_location(JDBC_SYSTEM_RESOURCE)
    name_token = aliases.get_name_token(location)
    location.append_location(JDBC_RESOURCE, JDBC_DRIVER_PARAMS)

    system_resources = dictionary_utils.get_dictionary_element(
        model.get_model_resources(), JDBC_SYSTEM_RESOURCE)
    for jdbc_name in system_resources:
        database_hash = dict()
        database_hash[DATASOURCE_NAME] = jdbc_name

        named = dictionary_utils.get_dictionary_element(
            system_resources, jdbc_name)
        resources = dictionary_utils.get_dictionary_element(
            named, JDBC_RESOURCE)
        driver_params = dictionary_utils.get_dictionary_element(
            resources, JDBC_DRIVER_PARAMS)
        url = dictionary_utils.get_element(driver_params, URL)
        if url is None:
            url = ''
        database_hash[DS_URL] = url

        # should change spaces to hyphens?
        database_hash[DATABASE_PREFIX] = jdbc_name.lower()

        # get the name that matches secret
        location.add_name_token(name_token, jdbc_name)
        secret_name = target_configuration_helper.get_secret_name_for_location(
            location, domain_uid, aliases)
        database_hash[DATABASE_CREDENTIALS] = secret_name

        databases.append(database_hash)

    template_hash[DATABASES] = databases

    return template_hash
Ejemplo n.º 16
0
def generate_k8s_script(model_context, token_dictionary, model_dictionary, exception_type):
    """
    Generate a shell script for creating k8s secrets.
    :param model_context: used to determine output directory
    :param token_dictionary: contains every token
    :param model_dictionary: used to determine domain UID
    :param exception_type: type of exception to throw
    """

    # determine the domain name and UID
    topology = dictionary_utils.get_dictionary_element(model_dictionary, TOPOLOGY)
    domain_name = dictionary_utils.get_element(topology, NAME)
    if domain_name is None:
        domain_name = DEFAULT_WLS_DOMAIN_NAME

    domain_uid = k8s_helper.get_domain_uid(domain_name)
    comment = exception_helper.get_message("WLSDPLY-01665")
    script_hash = {'domainUid': domain_uid, 'topComment': comment}

    # build a map of secret names (jdbc-generic1) to keys (username, password)
    secret_map = {}
    for property_name in token_dictionary:
        halves = property_name.split(':', 1)
        value = token_dictionary[property_name]
        if len(halves) == 2:
            secret_name = halves[0]

            # admin credentials are inserted later, at the top of the list
            if secret_name == WEBLOGIC_CREDENTIALS_SECRET_NAME:
                continue

            secret_key = halves[1]
            if secret_name not in secret_map:
                secret_map[secret_name] = {}
            secret_keys = secret_map[secret_name]
            secret_keys[secret_key] = value

    # update the hash with secrets and paired secrets
    secrets = []
    paired_secrets = [_build_secret_hash(WEBLOGIC_CREDENTIALS_SECRET_NAME, USER_TAG, PASSWORD_TAG)]

    secret_names = secret_map.keys()
    secret_names.sort()
    for secret_name in secret_names:
        secret_keys = secret_map[secret_name]
        user_name = dictionary_utils.get_element(secret_keys, SECRET_USERNAME_KEY)
        if user_name is None:
            secrets.append(_build_secret_hash(secret_name, None, PASSWORD_TAG))
        else:
            paired_secrets.append(_build_secret_hash(secret_name, user_name, PASSWORD_TAG))

    script_hash['secrets'] = secrets
    script_hash['pairedSecrets'] = paired_secrets
    script_hash['longMessage'] = exception_helper.get_message('WLSDPLY-01667', '${LONG_SECRETS_COUNT}')

    long_messages = [
        {'text': exception_helper.get_message('WLSDPLY-01668')},
        {'text': exception_helper.get_message('WLSDPLY-01669')},
        {'text': exception_helper.get_message('WLSDPLY-01670')}
    ]
    script_hash['longMessageDetails'] = long_messages

    file_location = model_context.get_output_dir()
    k8s_file = File(file_location, K8S_SCRIPT_NAME)
    file_template_helper.create_file_from_resource(K8S_SCRIPT_RESOURCE_PATH, script_hash, k8s_file, exception_type)
    FileUtils.chmod(k8s_file.getPath(), 0750)
    def _update_server(self, name, dictionary, config_location):
        _method_name = '_update_server'

        # these imports are local, since they are only present in JRF environments.
        # this method is only called after that check has been made.
        from oracle.core.ojdl.weblogic.ODLConfiguration import CONFIG_DIR
        from oracle.core.ojdl.weblogic.ODLConfiguration import CONFIG_FILE
        from oracle.core.ojdl.logging.config import LoggingConfigurationDocument

        config_dir = File(self.model_context.get_domain_home(), CONFIG_DIR)
        server_dir = File(config_dir, name)
        config_file = File(server_dir, CONFIG_FILE)

        try:
            FileUtils.validateWritableFile(config_file.getPath())
            document = LoggingConfigurationDocument(
                FileInputStream(config_file))

            # configure AddJvmNumber
            add_jvm_number = dictionary_utils.get_element(
                dictionary, _ADD_JVM_NUMBER)
            if add_jvm_number is not None:
                document.setAddJvmNumber(
                    alias_utils.convert_boolean(add_jvm_number))

            # configure HandlerDefaults
            handler_defaults = dictionary_utils.get_dictionary_element(
                dictionary, _HANDLER_DEFAULTS)
            if handler_defaults is not None:
                for key in handler_defaults:
                    value = handler_defaults[key]
                    document.setHandlerDefault(key, _get_property_text(value))

            # configure Handlers
            # do these before loggers, in case new handlers are assigned to loggers
            existing_handler_names = document.getHandlerNames()
            handlers = dictionary_utils.get_dictionary_element(
                dictionary, _HANDLER)
            if handlers is not None:
                for handler_name in handlers:
                    handler = handlers[handler_name]
                    self._configure_handler(handler_name, handler, document,
                                            existing_handler_names,
                                            config_location)

            # configure Loggers
            existing_logger_names = document.getLoggerNames()
            loggers = dictionary_utils.get_dictionary_element(
                dictionary, _LOGGER)
            if loggers is not None:
                for logger_name in loggers:
                    logger = loggers[logger_name]
                    self._configure_logger(logger_name, logger, document,
                                           existing_logger_names,
                                           config_location)

            document.writeDocument(FileOutputStream(config_file))

        except (ParserConfigurationException, SAXException, IOException,
                IllegalArgumentException), ex:
            self.logger.severe('WLSDPLY-19707',
                               name,
                               ex.getLocalizedMessage(),
                               class_name=self.__class_name,
                               method_name=_method_name)
Ejemplo n.º 18
0
    def __create_clusters_and_servers(self, location):
        """
        Create the /Cluster, /ServerTemplate, and /Server folder objects.
        :param location: the location to use
        :raises: CreateException: if an error occurs
        """
        _method_name = '__create_clusters_and_servers'

        self.logger.entering(str(location),
                             class_name=self.__class_name,
                             method_name=_method_name)
        #
        # In order for source domain provisioning to work with dynamic clusters, we have to provision
        # the ServerTemplates.  There is a cyclical dependency between Server Template and Clusters so we
        # need for the ServerTemplates to exist before create clusters.  Once the clusters are provisioned,
        # then we can fully populate the ServerTemplates.
        #
        server_template_nodes = dictionary_utils.get_dictionary_element(
            self._topology, SERVER_TEMPLATE)
        if len(server_template_nodes) > 0 and self._is_type_valid(
                location, SERVER_TEMPLATE):
            st_location = LocationContext(location).append_location(
                SERVER_TEMPLATE)
            st_mbean_type = self.alias_helper.get_wlst_mbean_type(st_location)
            st_create_path = self.alias_helper.get_wlst_create_path(
                st_location)
            self.wlst_helper.cd(st_create_path)

            st_token_name = self.alias_helper.get_name_token(st_location)
            for server_template_name in server_template_nodes:
                st_name = self.wlst_helper.get_quoted_name_for_wlst(
                    server_template_name)
                if st_token_name is not None:
                    st_location.add_name_token(st_token_name, st_name)

                st_mbean_name = self.alias_helper.get_wlst_mbean_name(
                    st_location)
                self.logger.info('WLSDPLY-12220', SERVER_TEMPLATE,
                                 st_mbean_name)
                self.wlst_helper.create(st_mbean_name, st_mbean_type)

        cluster_nodes = dictionary_utils.get_dictionary_element(
            self._topology, CLUSTER)
        if len(cluster_nodes) > 0:
            self._create_named_mbeans(CLUSTER,
                                      cluster_nodes,
                                      location,
                                      log_created=True)

        #
        # Now, fully populate the ServerTemplates, if any.
        #
        if len(server_template_nodes) > 0:
            self._create_named_mbeans(SERVER_TEMPLATE,
                                      server_template_nodes,
                                      location,
                                      log_created=True)

        #
        # Finally, create/update the servers.
        #
        server_nodes = dictionary_utils.get_dictionary_element(
            self._topology, SERVER)
        if len(server_nodes) > 0:
            self._create_named_mbeans(SERVER,
                                      server_nodes,
                                      location,
                                      log_created=True)

        self.logger.exiting(class_name=self.__class_name,
                            method_name=_method_name)
        return
Ejemplo n.º 19
0
def _build_template_hash(model, model_context, aliases, credential_injector):
    """
    Create a dictionary of substitution values to apply to the templates.
    :param model: Model object used to derive values
    :param model_context: used to determine domain type
    :param aliases: used to derive folder names
    :param credential_injector: used to identify secrets
    :return: the hash dictionary
    """
    template_hash = dict()

    # actual domain name

    domain_name = dictionary_utils.get_element(model.get_model_topology(),
                                               NAME)
    if domain_name is None:
        domain_name = DEFAULT_WLS_DOMAIN_NAME

    # domain UID, name and prefix must follow DNS-1123

    domain_uid = k8s_helper.get_domain_uid(domain_name)
    template_hash[DOMAIN_UID] = domain_uid
    template_hash[DOMAIN_NAME] = domain_uid
    template_hash[DOMAIN_PREFIX] = domain_uid

    # secrets that should not be included in secrets section
    declared_secrets = []

    # admin credential

    admin_secret = domain_uid + target_configuration_helper.WEBLOGIC_CREDENTIALS_SECRET_SUFFIX
    declared_secrets.append(admin_secret)
    template_hash[WEBLOGIC_CREDENTIALS_SECRET] = admin_secret

    # configuration / model
    template_hash[DOMAIN_TYPE] = model_context.get_domain_type()

    # clusters

    clusters = []
    cluster_list = dictionary_utils.get_dictionary_element(
        model.get_model_topology(), CLUSTER)
    for cluster_name in cluster_list:
        cluster_hash = dict()
        cluster_hash[CLUSTER_NAME] = cluster_name

        cluster_values = dictionary_utils.get_dictionary_element(
            cluster_list, cluster_name)
        server_count = k8s_helper.get_server_count(cluster_name,
                                                   cluster_values,
                                                   model.get_model(), aliases)
        cluster_hash[REPLICAS] = str(server_count)
        clusters.append(cluster_hash)

    template_hash[CLUSTERS] = clusters
    template_hash[HAS_CLUSTERS] = len(clusters) != 0

    # databases

    databases = []

    location = LocationContext().append_location(JDBC_SYSTEM_RESOURCE)
    name_token = aliases.get_name_token(location)
    location.append_location(JDBC_RESOURCE, JDBC_DRIVER_PARAMS)

    system_resources = dictionary_utils.get_dictionary_element(
        model.get_model_resources(), JDBC_SYSTEM_RESOURCE)
    for jdbc_name in system_resources:
        database_hash = dict()
        database_hash[DATASOURCE_NAME] = jdbc_name

        named = dictionary_utils.get_dictionary_element(
            system_resources, jdbc_name)
        resources = dictionary_utils.get_dictionary_element(
            named, JDBC_RESOURCE)
        driver_params = dictionary_utils.get_dictionary_element(
            resources, JDBC_DRIVER_PARAMS)
        url = dictionary_utils.get_element(driver_params, URL)
        if url is None:
            url = ''
        database_hash[DS_URL] = url

        # should change spaces to hyphens?
        database_hash[DATABASE_PREFIX] = jdbc_name.lower()

        # get the name that matches secret
        location.add_name_token(name_token, jdbc_name)
        secret_name = target_configuration_helper.get_secret_name_for_location(
            location, domain_uid, aliases)
        database_hash[DATABASE_CREDENTIALS] = secret_name

        databases.append(database_hash)

    template_hash[DATABASES] = databases
    template_hash[HAS_DATABASES] = len(databases) != 0

    # additional secrets - exclude admin

    additional_secrets = []

    # combine user/password properties to get a single list
    secrets = []
    for property_name in credential_injector.get_variable_cache():
        halves = property_name.split(':', 1)
        name = halves[0]
        if name not in secrets:
            secrets.append(name)

    for secret in secrets:
        secrets_hash = dict()
        qualified_name = domain_uid + "-" + secret
        if qualified_name not in declared_secrets:
            secrets_hash[ADDITIONAL_SECRET_NAME] = qualified_name
            additional_secrets.append(secrets_hash)

    template_hash[ADDITIONAL_SECRETS] = additional_secrets
    template_hash[HAS_ADDITIONAL_SECRETS] = len(additional_secrets) != 0

    return template_hash
Ejemplo n.º 20
0
    def _create_named_mbeans(self,
                             type_name,
                             model_nodes,
                             base_location,
                             log_created=False):
        """
        Create the specified type of MBeans that support multiple instances in the specified location.
        :param type_name: the model folder type
        :param model_nodes: the model dictionary of the specified model folder type
        :param base_location: the base location object to use to create the MBeans
        :param log_created: whether or not to log created at INFO level, by default it is logged at the FINE level
        :raises: CreateException: if an error occurs
        """
        _method_name = '_create_named_mbeans'

        self.logger.entering(type_name,
                             str(base_location),
                             log_created,
                             class_name=self.__class_name,
                             method_name=_method_name)
        if model_nodes is None or len(
                model_nodes) == 0 or not self._is_type_valid(
                    base_location, type_name):
            return

        location = LocationContext(base_location).append_location(type_name)
        self._process_flattened_folder(location)

        token_name = self.alias_helper.get_name_token(location)
        create_path = self.alias_helper.get_wlst_create_path(location)
        list_path = self.alias_helper.get_wlst_list_path(location)
        existing_folder_names = self._get_existing_folders(list_path)
        for model_name in model_nodes:
            name = self.wlst_helper.get_quoted_name_for_wlst(model_name)

            if token_name is not None:
                location.add_name_token(token_name, name)

            wlst_type, wlst_name = self.alias_helper.get_wlst_mbean_type_and_name(
                location)
            if wlst_name not in existing_folder_names:
                if log_created:
                    self.logger.info('WLSDPLY-12100',
                                     type_name,
                                     name,
                                     class_name=self.__class_name,
                                     method_name=_method_name)
                else:
                    self.logger.fine('WLSDPLY-12100',
                                     type_name,
                                     name,
                                     class_name=self.__class_name,
                                     method_name=_method_name)
                self.wlst_helper.create_and_cd(self.alias_helper, wlst_type,
                                               wlst_name, location,
                                               create_path)
            else:
                if log_created:
                    self.logger.info('WLSDPLY-12101',
                                     type_name,
                                     name,
                                     class_name=self.__class_name,
                                     method_name=_method_name)
                else:
                    self.logger.fine('WLSDPLY-12101',
                                     type_name,
                                     name,
                                     class_name=self.__class_name,
                                     method_name=_method_name)

                attribute_path = self.alias_helper.get_wlst_attributes_path(
                    location)
                self.wlst_helper.cd(attribute_path)

            child_nodes = dictionary_utils.get_dictionary_element(
                model_nodes, name)
            self._process_child_nodes(location, child_nodes)

        self.logger.exiting(class_name=self.__class_name,
                            method_name=_method_name)
        return
Ejemplo n.º 21
0
def generate_k8s_script(model_context, token_dictionary, model_dictionary):
    """
    Generate a shell script for creating k8s secrets.
    :param model_context: used to determine output directory
    :param token_dictionary: contains every token
    :param model_dictionary: used to determine domain UID
    """

    # determine the domain name and UID
    topology = dictionary_utils.get_dictionary_element(model_dictionary,
                                                       TOPOLOGY)
    domain_name = dictionary_utils.get_element(topology, NAME)
    if domain_name is None:
        domain_name = DEFAULT_WLS_DOMAIN_NAME

    domain_uid = k8s_helper.get_domain_uid(domain_name)

    nl = '\n'
    file_location = model_context.get_kubernetes_output_dir()
    k8s_file = os.path.join(file_location, "create_k8s_secrets.sh")
    k8s_script = open(k8s_file, 'w')

    k8s_script.write('#!/bin/bash' + nl)

    k8s_script.write(nl)
    k8s_script.write('set -eu' + nl)

    k8s_script.write(nl)
    message = exception_helper.get_message("WLSDPLY-01665", ADMIN_USER_TAG,
                                           ADMIN_PASSWORD_TAG)
    k8s_script.write("# " + message + nl)
    k8s_script.write('NAMESPACE=default' + nl)
    k8s_script.write('DOMAIN_UID=' + domain_uid + nl)

    k8s_script.write(nl)
    k8s_script.write('function create_k8s_secret {' + nl)
    k8s_script.write(
        '  kubectl -n $NAMESPACE delete secret ${DOMAIN_UID}-$1 --ignore-not-found'
        + nl)
    k8s_script.write(
        '  kubectl -n $NAMESPACE create secret generic ${DOMAIN_UID}-$1 --from-literal=password=$2'
        + nl)
    k8s_script.write(
        '  kubectl -n $NAMESPACE label secret ${DOMAIN_UID}-$1 weblogic.domainUID=${DOMAIN_UID}'
        + nl)
    k8s_script.write('}' + nl)

    k8s_script.write(nl)
    k8s_script.write('function create_paired_k8s_secret {' + nl)
    k8s_script.write(
        '  kubectl -n $NAMESPACE delete secret ${DOMAIN_UID}-$1 --ignore-not-found'
        + nl)
    k8s_script.write(
        '  kubectl -n $NAMESPACE create secret generic ${DOMAIN_UID}-$1' +
        ' --from-literal=username=$2 --from-literal=password=$3' + nl)
    k8s_script.write(
        '  kubectl -n $NAMESPACE label secret ${DOMAIN_UID}-$1 weblogic.domainUID=${DOMAIN_UID}'
        + nl)
    k8s_script.write('}' + nl)

    command_string = "create_paired_k8s_secret %s %s %s" \
                     % (WEBLOGIC_CREDENTIALS_SECRET_NAME, ADMIN_USER_TAG, ADMIN_PASSWORD_TAG)

    k8s_script.write(nl)
    message = exception_helper.get_message("WLSDPLY-01664", ADMIN_USER_TAG,
                                           ADMIN_PASSWORD_TAG,
                                           WEBLOGIC_CREDENTIALS_SECRET_NAME)
    k8s_script.write("# " + message + nl)
    k8s_script.write(command_string + nl)

    # build a map of secret names (jdbc-generic1) to keys (username, password)
    secret_map = {}
    for property_name in token_dictionary:
        halves = property_name.split(':', 1)
        value = token_dictionary[property_name]
        if len(halves) == 2:
            secret_name = halves[0]

            # admin credentials are hard-coded in the script, to be first in the list
            if secret_name == WEBLOGIC_CREDENTIALS_SECRET_NAME:
                continue

            secret_key = halves[1]
            if secret_name not in secret_map:
                secret_map[secret_name] = {}
            secret_keys = secret_map[secret_name]
            secret_keys[secret_key] = value

    secret_names = secret_map.keys()
    secret_names.sort()

    for secret_name in secret_names:
        secret_keys = secret_map[secret_name]
        user_name = dictionary_utils.get_element(secret_keys,
                                                 SECRET_USERNAME_KEY)

        if user_name is None:
            message = exception_helper.get_message("WLSDPLY-01663",
                                                   PASSWORD_TAG, secret_name)
            command_string = "create_k8s_secret %s %s " \
                             % (secret_name, PASSWORD_TAG)
        else:
            message = exception_helper.get_message("WLSDPLY-01664", USER_TAG,
                                                   PASSWORD_TAG, secret_name)
            command_string = "create_paired_k8s_secret %s %s %s " \
                             % (secret_name, user_name, PASSWORD_TAG)

        k8s_script.write(nl)
        k8s_script.write("# " + message + nl)
        k8s_script.write(command_string + nl)

    k8s_script.close()
    FileUtils.chmod(k8s_file, 0750)
Ejemplo n.º 22
0
    def _update_resource_dictionary(self, resource_dict):
        """
        Revise the resource file structure with values from defaults, command line, and elsewhere in model
        :param resource_dict: the resource file dictionary
        """
        _method_name = '_update_resource_dictionary'

        # add API version if not present
        if API_VERSION not in resource_dict:
            resource_dict[API_VERSION] = DEFAULT_API_VERSION

        # add kind if not present
        if KIND not in resource_dict:
            resource_dict[KIND] = DEFAULT_KIND

        # add a metadata section if not present, since we'll at least add name
        if METADATA not in resource_dict:
            resource_dict[METADATA] = PyOrderedDict()
        metadata_section = resource_dict[METADATA]

        # if metadata name not present, use the domain name from the model, or default
        if K_NAME not in metadata_section:
            domain_name = dictionary_utils.get_element(self._model.get_model_topology(), NAME)
            if domain_name is None:
                domain_name = DEFAULT_WLS_DOMAIN_NAME
            metadata_section[K_NAME] = domain_name

        # add a spec section if not present, since we'll at least add domain home
        if SPEC not in resource_dict:
            resource_dict[SPEC] = PyOrderedDict()
        spec_section = resource_dict[SPEC]

        # only set domain home if it is not present in spec section
        if DOMAIN_HOME not in spec_section:
            spec_section[DOMAIN_HOME] = self._model_context.get_domain_home()

        # only set image if it is not present in spec section
        if IMAGE not in spec_section:
            spec_section[IMAGE] = DEFAULT_IMAGE

        # imagePullSecrets is required unless imagePullPolicy is Never
        pull_secrets_required = True
        if IMAGE_PULL_POLICY in spec_section:
            policy = str(spec_section[IMAGE_PULL_POLICY])
            pull_secrets_required = (policy != NEVER)

        # if imagePullSecrets required and not present, add a list with one FIX ME value
        if pull_secrets_required and (IMAGE_PULL_SECRETS not in spec_section):
            secrets_list = DictionaryList()
            secrets_list.append({'name': DEFAULT_IMAGE_PULL_SECRETS})
            spec_section[IMAGE_PULL_SECRETS] = secrets_list

        # if webLogicCredentialsSecret not present, add it using the FIX ME value
        if WEBLOGIC_CREDENTIALS_SECRET not in spec_section:
            spec_section[WEBLOGIC_CREDENTIALS_SECRET] = DEFAULT_WEBLOGIC_CREDENTIALS_SECRET

        # only update clusters if section is not present in spec section
        if CLUSTERS not in spec_section:
            topology = self._model.get_model_topology()
            model_clusters = dictionary_utils.get_dictionary_element(topology, CLUSTER)
            if len(model_clusters) > 0:
                cluster_list = DictionaryList()
                spec_section[CLUSTERS] = cluster_list
                for cluster_name, cluster_values in model_clusters.items():
                    server_count = k8s_helper.get_server_count(cluster_name, cluster_values, self._model.get_model())
                    cluster_dict = PyOrderedDict()
                    cluster_dict[CLUSTER_NAME] = cluster_name
                    cluster_dict[REPLICAS] = server_count

                    self._logger.info("WLSDPLY-10002", cluster_name, server_count, method_name=_method_name,
                                      class_name=self._class_name)
                    cluster_list.append(cluster_dict)
        return
def generate_k8s_script(model_context, token_dictionary, model_dictionary):
    """
    Generate a shell script for creating k8s secrets.
    :param model_context: used to determine output directory
    :param token_dictionary: contains every token
    :param model_dictionary: used to determine domain UID
    """
    target_config = model_context.get_target_configuration()
    if not target_config.requires_secrets_script():
        return

    # determine the domain name and UID
    topology = dictionary_utils.get_dictionary_element(model_dictionary,
                                                       TOPOLOGY)
    domain_name = dictionary_utils.get_element(topology, NAME)
    if domain_name is None:
        domain_name = DEFAULT_WLS_DOMAIN_NAME

    domain_uid = k8s_helper.get_domain_uid(domain_name)

    nl = '\n'
    file_location = model_context.get_kubernetes_output_dir()
    k8s_file = os.path.join(file_location, "create_k8s_secrets.sh")
    k8s_script = open(k8s_file, 'w')

    k8s_script.write('#!/bin/bash' + nl)

    k8s_script.write(nl)
    k8s_script.write('set -eu' + nl)

    k8s_script.write(nl)
    message = exception_helper.get_message("WLSDPLY-01665", ADMIN_USER_TAG,
                                           ADMIN_PASSWORD_TAG)
    k8s_script.write("# " + message + nl)
    k8s_script.write('NAMESPACE=default' + nl)
    k8s_script.write('DOMAIN_UID=' + domain_uid + nl)

    k8s_script.write(nl)
    k8s_script.write('function create_k8s_secret {' + nl)
    k8s_script.write(
        '  kubectl -n $NAMESPACE delete secret ${DOMAIN_UID}-$1 --ignore-not-found'
        + nl)
    k8s_script.write(
        '  kubectl -n $NAMESPACE create secret generic ${DOMAIN_UID}-$1 --from-literal=password=$2'
        + nl)
    k8s_script.write(
        '  kubectl -n $NAMESPACE label secret ${DOMAIN_UID}-$1 weblogic.domainUID=${DOMAIN_UID}'
        + nl)
    k8s_script.write('}' + nl)

    k8s_script.write(nl)
    k8s_script.write('function create_paired_k8s_secret {' + nl)
    k8s_script.write(
        '  kubectl -n $NAMESPACE delete secret ${DOMAIN_UID}-$1 --ignore-not-found'
        + nl)
    k8s_script.write(
        '  kubectl -n $NAMESPACE create secret generic ${DOMAIN_UID}-$1' +
        ' --from-literal=username=$2 --from-literal=password=$3' + nl)
    k8s_script.write(
        '  kubectl -n $NAMESPACE label secret ${DOMAIN_UID}-$1 weblogic.domainUID=${DOMAIN_UID}'
        + nl)
    k8s_script.write('}' + nl)

    command_string = "create_paired_k8s_secret %s %s %s" \
                     % (WEBLOGIC_CREDENTIALS_SECRET_NAME, ADMIN_USER_TAG, ADMIN_PASSWORD_TAG)

    k8s_script.write(nl)
    message = exception_helper.get_message("WLSDPLY-01664", ADMIN_USER_TAG,
                                           ADMIN_PASSWORD_TAG,
                                           WEBLOGIC_CREDENTIALS_SECRET_NAME)
    k8s_script.write("# " + message + nl)
    k8s_script.write(command_string + nl)

    for property_name in token_dictionary:
        # AdminPassword, AdminUser are created separately,
        # and SecurityConfig.NodeManagerPasswordEncrypted is the short name which filters out
        if property_name in [
                'AdminPassword', 'AdminUserName',
                'SecurityConfig.NodeManagerPasswordEncrypted'
        ]:
            continue

        user_name = find_user_name(property_name, model_dictionary)
        secret_names = property_name.lower().split('.')
        secret_name = '-'.join(secret_names[:-1])

        if user_name is None:
            message = exception_helper.get_message("WLSDPLY-01663",
                                                   PASSWORD_TAG, secret_name)
            command_string = "create_k8s_secret %s %s " \
                             % (secret_name, PASSWORD_TAG)
        else:
            message = exception_helper.get_message("WLSDPLY-01664", USER_TAG,
                                                   PASSWORD_TAG, secret_name)
            command_string = "create_paired_k8s_secret %s %s %s " \
                             % (secret_name, user_name, PASSWORD_TAG)

        k8s_script.write(nl)
        k8s_script.write("# " + message + nl)
        k8s_script.write(command_string + nl)

    k8s_script.close()
        if formatter is not None:
            try:
                document.setHandlerFormatter(handler_name, str(formatter))
            except IllegalArgumentException, iae:
                self._log_invalid_set(_FORMATTER, formatter, iae, _method_name,
                                      handler_path)

        encoding = dictionary_utils.get_element(handler, _ENCODING)
        if encoding is not None:
            try:
                document.setHandlerEncoding(handler_name, str(encoding))
            except IllegalArgumentException, iae:
                self._log_invalid_set(_ENCODING, encoding, iae, _method_name,
                                      handler_path)

        properties = dictionary_utils.get_dictionary_element(
            handler, _PROPERTIES)
        for key in properties:
            value = properties[key]
            property_text = _get_property_text(value)
            try:
                document.setHandlerProperty(handler_name, key, property_text)
            except IllegalArgumentException, iae:
                self.logger.severe('WLSDPLY-19706',
                                   key,
                                   property_text,
                                   handler_path,
                                   iae.getLocalizedMessage(),
                                   class_name=self.__class_name,
                                   method_name=_method_name)

    def _configure_logger(self, logger_name, logger, document, existing_names,
    def _update_resource_dictionary(self, resource_dict):
        """
        Revise the resource file structure with values from defaults, command line, and elsewhere in model
        :param resource_dict: the resource file dictionary
        """
        _method_name = '_update_resource_dictionary'

        # add a metadata section if not present, since we'll at least add name
        if METADATA not in resource_dict:
            _add_to_top(resource_dict, METADATA, PyOrderedDict())
        metadata_section = resource_dict[METADATA]

        # add kind if not present
        if KIND not in resource_dict:
            _add_to_top(resource_dict, KIND, DEFAULT_KIND)

        # add API version if not present
        if API_VERSION not in resource_dict:
            _add_to_top(resource_dict, API_VERSION, DEFAULT_API_VERSION)

        # if metadata name not present, use the domain name from the model, or default
        if K_NAME not in metadata_section:
            domain_name = dictionary_utils.get_element(self._model.get_model_topology(), NAME)
            if domain_name is None:
                domain_name = DEFAULT_WLS_DOMAIN_NAME
            domain_name = k8s_helper.get_domain_uid(domain_name)
            metadata_section[K_NAME] = domain_name
        domain_uid = metadata_section[K_NAME]

        # add a spec section if not present, since we'll at least add domain home
        if SPEC not in resource_dict:
            resource_dict[SPEC] = PyOrderedDict()
        spec_section = resource_dict[SPEC]

        # only set domain home if it is not present in spec section
        if DOMAIN_HOME not in spec_section:
            spec_section[DOMAIN_HOME] = self._model_context.get_domain_home()

        # only set image if it is not present in spec section
        if IMAGE not in spec_section:
            spec_section[IMAGE] = DEFAULT_IMAGE

        # imagePullSecrets is required unless imagePullPolicy is Never
        pull_secrets_required = True
        if IMAGE_PULL_POLICY in spec_section:
            policy = str(spec_section[IMAGE_PULL_POLICY])
            pull_secrets_required = (policy != NEVER)

        # if imagePullSecrets required and not present, add a list with one FIX ME value
        if pull_secrets_required and (IMAGE_PULL_SECRETS not in spec_section):
            secrets_list = list()
            secrets_list.append({'name': DEFAULT_IMAGE_PULL_SECRETS})
            spec_section[IMAGE_PULL_SECRETS] = secrets_list

        # if webLogicCredentialsSecret not present, add it using the FIX ME value
        if WEBLOGIC_CREDENTIALS_SECRET not in spec_section:
            spec_section[WEBLOGIC_CREDENTIALS_SECRET] = DEFAULT_WEBLOGIC_CREDENTIALS_SECRET

        # only update clusters if section is not present in spec section
        if CLUSTERS not in spec_section:
            topology = self._model.get_model_topology()
            model_clusters = dictionary_utils.get_dictionary_element(topology, CLUSTER)
            if len(model_clusters) > 0:
                cluster_list = list()
                spec_section[CLUSTERS] = cluster_list
                for cluster_name, cluster_values in model_clusters.items():
                    server_count = k8s_helper.get_server_count(cluster_name, cluster_values, self._model.get_model(),
                                                               self._aliases)
                    cluster_dict = PyOrderedDict()
                    cluster_dict[CLUSTER_NAME] = cluster_name
                    cluster_dict[REPLICAS] = server_count

                    self._logger.info("WLSDPLY-10002", cluster_name, server_count, method_name=_method_name,
                                      class_name=self._class_name)
                    cluster_list.append(cluster_dict)

        # create a configuration section in spec if needed
        if CONFIGURATION not in spec_section:
            spec_section[CONFIGURATION] = PyOrderedDict()
        configuration_section = spec_section[CONFIGURATION]

        # create a model section in configuration if needed
        if MODEL not in configuration_section:
            configuration_section[MODEL] = PyOrderedDict()
        model_section = configuration_section[MODEL]

        # set domainType if not specified
        if DOMAIN_TYPE not in model_section:
            model_section[DOMAIN_TYPE] = self._model_context.get_domain_type()

        if SECRETS in configuration_section:
            # if secrets specified, convert them to a hyphen list
            secrets = alias_utils.convert_to_model_type("list", configuration_section[SECRETS], MODEL_LIST_DELIMITER)
            secrets_list = list()
            secrets_list.extend(secrets)

        else:
            # pull the secrets from the model
            secrets_list = list()
            _add_secrets(self._model.get_model(), secrets_list, domain_uid)

        if secrets_list:
            configuration_section[SECRETS] = secrets_list

        return
Ejemplo n.º 26
0
    def _create_security_provider_mbeans(self,
                                         type_name,
                                         model_nodes,
                                         base_location,
                                         log_created=False):
        """
        Create the specified security provider MBean types that support multiple instances but use an
        artificial type subfolder in the specified location.
        :param type_name: the model folder type
        :param model_nodes: the model dictionary of the specified model folder type
        :param base_location: the base location object to use to create the MBeans
        :param log_created: whether or not to log created at INFO level, by default it is logged at the FINE level
        :raises: CreateException: if an error occurs
        """
        _method_name = '_create_security_provider_mbeans'

        self.logger.entering(type_name,
                             str(base_location),
                             log_created,
                             class_name=self.__class_name,
                             method_name=_method_name)
        if model_nodes is None or len(
                model_nodes) == 0 or not self._is_type_valid(
                    base_location, type_name):
            return

        location = LocationContext(base_location).append_location(type_name)
        self._process_flattened_folder(location)

        token_name = self.alias_helper.get_name_token(location)
        create_path = self.alias_helper.get_wlst_create_path(location)
        list_path = self.alias_helper.get_wlst_list_path(location)
        existing_folder_names = self._get_existing_folders(list_path)
        for model_name in model_nodes:
            prov_location = LocationContext(location)
            name = self.wlst_helper.get_quoted_name_for_wlst(model_name)
            if token_name is not None:
                prov_location.add_name_token(token_name, name)

            wlst_base_provider_type, wlst_name = self.alias_helper.get_wlst_mbean_type_and_name(
                prov_location)
            model_node = model_nodes[model_name]
            if model_node is not None:
                if len(model_node) == 1:
                    model_type_subfolder_name = list(model_node.keys())[0]
                    prov_location.append_location(model_type_subfolder_name)
                    wlst_type = self.alias_helper.get_wlst_mbean_type(
                        prov_location)
                else:
                    ex = exception_helper.create_create_exception(
                        'WLSDPLY-12117', type_name, model_name,
                        len(model_node))
                    self.logger.throwing(ex,
                                         class_name=self.__class_name,
                                         method_name=_method_name)
                    raise ex
            else:
                # The node is empty so nothing to do...move to the next named node.
                continue

            if wlst_name not in existing_folder_names:
                if log_created:
                    self.logger.info('WLSDPLY-12118',
                                     type_name,
                                     model_type_subfolder_name,
                                     name,
                                     class_name=self.__class_name,
                                     method_name=_method_name)
                else:
                    self.logger.fine('WLSDPLY-12118',
                                     type_name,
                                     model_type_subfolder_name,
                                     name,
                                     class_name=self.__class_name,
                                     method_name=_method_name)
                self.wlst_helper.cd(create_path)
                self.wlst_helper.create(wlst_name, wlst_type,
                                        wlst_base_provider_type)
            else:
                if log_created:
                    self.logger.info('WLSDPLY-12119',
                                     type_name,
                                     model_type_subfolder_name,
                                     name,
                                     class_name=self.__class_name,
                                     method_name=_method_name)
                else:
                    self.logger.fine('WLSDPLY-12119',
                                     type_name,
                                     model_type_subfolder_name,
                                     name,
                                     class_name=self.__class_name,
                                     method_name=_method_name)

            attribute_path = self.alias_helper.get_wlst_attributes_path(
                prov_location)
            self.wlst_helper.cd(attribute_path)

            child_nodes = dictionary_utils.get_dictionary_element(
                model_node, model_type_subfolder_name)
            self.logger.finest(
                'WLSDPLY-12111',
                self.alias_helper.get_model_folder_path(prov_location),
                self.wlst_helper.get_pwd(),
                class_name=self.__class_name,
                method_name=_method_name)
            self._set_attributes(prov_location, child_nodes)
            self._create_subfolders(prov_location, child_nodes)

        self.logger.exiting(class_name=self.__class_name,
                            method_name=_method_name)
        return
    def _write_folder(self, folder, name, is_multiple, path, indent):
        label = name
        if not label:
            label = KUBERNETES

        if wko_schema_helper.is_unsupported_folder(path):
            self._write_line("\n" + indent + "# " + label +
                             ": (unsupported folder)")
            return

        self._write_line("\n" + indent + label + ":")
        indent = indent + "  "

        properties = folder["properties"]
        if not properties:
            self.fail('No properties in schema path ' + path)

        multi_key = None
        if is_multiple:
            mapped_key = domain_resource_extractor.get_mapped_key(path)
            multi_property = dictionary_utils.get_element(
                properties, mapped_key)
            if multi_property:
                multi_key = mapped_key
                comment = 'maps to ' + multi_key
            else:
                comment = 'unique key for each'

            self._write_line(indent + "'" + name + "-1':  # " + comment)
            indent = indent + "  "

        property_names = list(properties.keys())
        property_names.sort()

        sub_folders = PyOrderedDict()
        multi_sub_folders = []
        for property_name in property_names:
            property_map = properties[property_name]

            property_type = dictionary_utils.get_element(property_map, "type")

            if property_type == "object":
                additional = dictionary_utils.get_dictionary_element(
                    property_map, "additionalProperties")
                additional_type = dictionary_utils.get_element(
                    additional, "type")
                if additional_type:
                    if additional_type not in wko_schema_helper.SIMPLE_TYPES:
                        self.fail('Unknown map type ' + additional_type +
                                  ' for ' + path + ' ' + property_name)
                    nest_indent = indent + "  "
                    self._write_line(indent + property_name + ":")
                    self._write_line(nest_indent + "'key-1': " +
                                     _get_sample_value(additional_type))
                    self._write_line(nest_indent + "'key-2': " +
                                     _get_sample_value(additional_type))
                else:
                    # single object instance
                    sub_folders[property_name] = property_map

            elif property_type == "array":
                array_items = dictionary_utils.get_dictionary_element(
                    property_map, "items")
                array_type = dictionary_utils.get_dictionary_element(
                    array_items, "type")
                if array_type == "object":
                    # multiple object instances
                    sub_folders[property_name] = array_items
                    multi_sub_folders.append(property_name)
                elif array_type in wko_schema_helper.SIMPLE_TYPES:
                    nest_indent = indent + "  "
                    self._write_line(indent + property_name + ": [")
                    self._write_line(nest_indent +
                                     _get_sample_value(array_type) + ",")
                    self._write_line(nest_indent +
                                     _get_sample_value(array_type))
                    self._write_line(indent + "]")
                else:
                    self.fail('Unknown array type ' + array_type + ' for ' +
                              path + ' ' + property_name)

            elif property_type in wko_schema_helper.SIMPLE_TYPES:
                if property_name != multi_key:
                    value = _get_sample_value(property_type)
                    enum_values = wko_schema_helper.get_enum_values(
                        property_map)
                    if enum_values:
                        value = "'" + enum_values[0] + "'  # " + ', '.join(
                            enum_values)
                    self._write_line(indent + str(property_name) + ": " +
                                     value)

            else:
                self.fail('Unknown property type ' + str(property_type) +
                          ' for ' + str(path) + ' ' + str(property_name))

        # process sub-folders after attributes for clarity
        for property_name in sub_folders:
            subfolder = sub_folders[property_name]
            is_multiple = property_name in multi_sub_folders
            next_path = wko_schema_helper.append_path(path, property_name)
            self._write_folder(subfolder, property_name, is_multiple,
                               next_path, indent)