def _parse_change_path(self, path): """ Determine the location and attribute name (if specified) for the specified change path :param path: delimited change path, such as "resources|JDBCSystemResource|Generic2|JdbcResource" :return: tuple - location for path, attribute name from path or None """ _method_name = '_parse_change_path' location = LocationContext() attribute_name = None name_token_next = False path_tokens = path.split(PATH_TOKEN) folder_names = self.aliases.get_model_section_top_level_folder_names(path_tokens[0]) attribute_names = [] attributes_location = self.aliases.get_model_section_attribute_location(path_tokens[0]) if attributes_location: attribute_names = self.aliases.get_model_attribute_names(attributes_location) if path_tokens[0] == KUBERNETES: return None, None for path_token in path_tokens[1:]: if name_token_next: token_name = self.aliases.get_name_token(location) location.add_name_token(token_name, path_token) name_token_next = False elif path_token in folder_names: location.append_location(path_token) folder_names = self.aliases.get_model_subfolder_names(location) attribute_names = self.aliases.get_model_attribute_names(location) regular_type = not self.aliases.is_artificial_type_folder(location) security_type = regular_type and self.aliases.is_security_provider_type(location) multiple_type = regular_type and self.aliases.supports_multiple_mbean_instances(location) if multiple_type or security_type: name_token_next = True else: token_name = self.aliases.get_name_token(location) if not location.get_name_for_token(token_name): location.add_name_token(token_name, "TOKEN") elif path_token in attribute_names: attribute_name = path_token name_token_next = False else: ex = exception_helper.create_compare_exception('WLSDPLY-05712', path_token, path) _logger.throwing(ex, class_name=_class_name, method_name=_method_name) raise ex return location, attribute_name
class ModelDiffer: def __init__(self, current_dict, past_dict, aliases): self.aliases = aliases self.final_changed_model = PyOrderedDict() self.current_dict = current_dict self.past_dict = past_dict self.set_current = sets.Set() self.set_past = sets.Set() if self.current_dict and len(self.current_dict.keys()) > 0: for item in self.current_dict.keys(): self.set_current.add(item) if self.past_dict and len(self.past_dict.keys()) > 0: for item in self.past_dict.keys(): self.set_past.add(item) self.intersect = self.set_current.intersection(self.set_past) def added(self): return self.set_current - self.intersect def removed(self): return self.set_past - self.intersect def changed(self): result = sets.Set() for o in self.intersect: if self.past_dict[o] != self.current_dict[o]: result.add(o) return result def unchanged(self): result = sets.Set() for o in self.intersect: if self.past_dict[o] == self.current_dict[o]: result.add(o) return result # def print_diff(self,s, category): # print category # if len(s) > 0: # print s def recursive_changed_detail(self, key, token, root): """ Recursively handle the changed items :param key: current key to locate the current dictionary for comparison :param token: token is a '|' separated string of the changed item representing the path of the model as it traverses down the path (this will be changed in recursive calls) :param root: root folder of the changes in the model (never change) """ debug("DEBUG: Entering recursive_changed_detail key=%s token=%s root=%s", key, token, root) a = ModelDiffer(self.current_dict[key], self.past_dict[key], self.aliases) diff = a.changed() added = a.added() removed = a.removed() saved_token = token debug('DEBUG: In recursive changed detail %s', diff) debug('DEBUG: In recursive added detail %s', added) if len(diff) > 0: for o in diff: token = saved_token # The token is a | separated string that is used to parse and rebuilt the structure later debug('DEBUG: in recursive changed detail walking down 1 %s', o) token = token + PATH_TOKEN + o if a.is_dict(o): debug('DEBUG: in recursive changed detail walking down 2 %s', token) a.recursive_changed_detail(o, token, root) last = token.rfind(PATH_TOKEN) token = root else: all_changes.append(token) last = token.rfind(PATH_TOKEN) token = root # already out of recursive calls, add all entries from current dictionary # resources.JDBCSubsystemResources.* (note it may not have the lower level nodes added_token = token debug('DEBUG: current added token %s', added_token) if len(added) > 0: for item in added: token = saved_token debug('DEBUG: recursive added token %s item %s ', token, item) all_added.append(token + PATH_TOKEN + item) # We don't really care about this, just put something here is enough if len(removed) > 0: for item in removed: token = saved_token debug('DEBUG: removed %s', item) all_removed.append(token + PATH_TOKEN + item) debug('DEBUG: Exiting recursive_changed_detail') def is_dict(self, key): """ Check to see if the ke in the current dictionary is a dictionary. :param key: key of the dictionary :return: true if it is a dictionary otherwise false """ if self.current_dict.has_key(key) and isinstance(self.current_dict[key], PyOrderedDict): return 1 else: return 0 def calculate_changed_model(self): """ Calculate the changed model. """ _method_name = 'calculate_changed_model' # This is the top level of changes only # e.g. from no appDeployments to have appDeployments # from no resources to have resources # # changed, added, removed are keys in the dictionary # i.e. resources, domainInfo, appDeployments, topology # try: changed = self.changed() added = self.added() removed = self.removed() # # Call recursive for each key (i.e. appDeployments, topology, resources etc..) # for s in changed: self.recursive_changed_detail(s, s, s) self._add_results(all_changes) self._add_results(all_added) self._add_results(all_removed, True) for s in added: self.recursive_changed_detail(s, s, s) self._add_results(all_changes) self._add_results(all_added) # Clean up previous delete first for x in all_removed: all_removed.remove(x) # Top level: e.g. delete all resources, all appDeployments for s in removed: self.recursive_changed_detail(s, s, s) self._add_results(all_removed, True) except (KeyError, IndexError), ke: _logger.severe('WLSDPLY-05709', str(ke)), ex = exception_helper.create_pywlst_exception('WLSDPLY-05709', str(ke)) _logger.throwing(ex, class_name=_class_name, method_name=_method_name) raise ex except AliasException, ae: _logger.severe('WLSDPLY-05709', ae.getLocalizedMessage(), error=ae, class_name=_class_name, method_name=_method_name) ex = exception_helper.create_compare_exception(ae.getLocalizedMessage(), error=ae) _logger.throwing(ex, class_name=_class_name, method_name=_method_name) raise ex
def compare(self): """ Do the actual compare of the models. :return: whether the difference is safe for online dynamic update """ _method_name = "compare" # arguments have been verified and same extensions model_file_name = None # validate models first try: if FileUtils.isYamlFile(JFile(os.path.splitext(self.current_dict_file)[1].lower())): model_file_name = self.current_dict_file FileToPython(model_file_name, True).parse() model_file_name = self.past_dict_file FileToPython(model_file_name, True).parse() self.model_context.set_validation_method('lax') aliases = Aliases(model_context=self.model_context, wlst_mode=WlstModes.OFFLINE, exception_type=ExceptionType.COMPARE) validator = Validator(self.model_context, aliases, wlst_mode=WlstModes.OFFLINE) variable_map = validator.load_variables(self.model_context.get_variable_file()) model_file_name = self.current_dict_file model_dictionary = cla_helper.merge_model_files(model_file_name, variable_map) variables.substitute(model_dictionary, variable_map, self.model_context) # Run this utility in stand-alone mode instead of tool mode, # which has stricter checks for the tools. # An archive is not used with the compare models and if the model # references a file in an archive, the compareModel will fail if # running in the stricter tool mode (even with lax). # arg_map = dict() arg_map[CommandLineArgUtil.MODEL_FILE_SWITCH] = model_file_name model_context_copy = self.model_context.copy(arg_map) val_copy = Validator(model_context_copy, aliases, wlst_mode=WlstModes.OFFLINE) # any variables should have been substituted at this point validate_variables = {} return_code = val_copy.validate_in_standalone_mode(model_dictionary, validate_variables, archive_file_name=None) if return_code == Validator.ReturnCode.STOP: _logger.severe('WLSDPLY-05705', model_file_name) return VALIDATION_FAIL current_dict = model_dictionary model_file_name = self.past_dict_file model_dictionary = cla_helper.merge_model_files(model_file_name, variable_map) variables.substitute(model_dictionary, variable_map, self.model_context) arg_map[CommandLineArgUtil.MODEL_FILE_SWITCH] = model_file_name model_context_copy = self.model_context.copy(arg_map) val_copy = Validator(model_context_copy, aliases, wlst_mode=WlstModes.OFFLINE) return_code = val_copy.validate_in_standalone_mode(model_dictionary, validate_variables, archive_file_name=None) if return_code == Validator.ReturnCode.STOP: _logger.severe('WLSDPLY-05705', model_file_name) return VALIDATION_FAIL past_dict = model_dictionary except ValidateException, te: _logger.severe('WLSDPLY-20009', _program_name, model_file_name, te.getLocalizedMessage(), error=te, class_name=_class_name, method_name=_method_name) ex = exception_helper.create_compare_exception(te.getLocalizedMessage(), error=te) _logger.throwing(ex, class_name=_class_name, method_name=_method_name) return VALIDATION_FAIL
except ValidateException, te: _logger.severe('WLSDPLY-20009', _program_name, model_file_name, te.getLocalizedMessage(), error=te, class_name=_class_name, method_name=_method_name) ex = exception_helper.create_compare_exception(te.getLocalizedMessage(), error=te) _logger.throwing(ex, class_name=_class_name, method_name=_method_name) return VALIDATION_FAIL except VariableException, ve: _logger.severe('WLSDPLY-20009', _program_name, model_file_name, ve.getLocalizedMessage(), error=ve, class_name=_class_name, method_name=_method_name) ex = exception_helper.create_compare_exception(ve.getLocalizedMessage(), error=ve) _logger.throwing(ex, class_name=_class_name, method_name=_method_name) return VALIDATION_FAIL except TranslateException, pe: _logger.severe('WLSDPLY-20009', _program_name, model_file_name, pe.getLocalizedMessage(), error=pe, class_name=_class_name, method_name=_method_name) ex = exception_helper.create_compare_exception(pe.getLocalizedMessage(), error=pe) _logger.throwing(ex, class_name=_class_name, method_name=_method_name) return VALIDATION_FAIL obj = ModelDiffer(current_dict, past_dict, aliases) obj.calculate_changed_model() net_diff = obj.get_final_changed_model() print BLANK_LINE print format_message('WLSDPLY-05706', self.current_dict_file, self.past_dict_file) print BLANK_LINE if len(net_diff.keys()) == 0: print format_message('WLSDPLY-05710') print BLANK_LINE return 0
def compare(self): """ Do the actual compare of the models. :return: whether the difference is safe for online dynamic update """ _method_name = "compare" # arguments have been verified and same extensions model_file_name = None # validate models first try: if os.path.splitext(self.current_dict_file)[1].lower() == ".yaml": model_file_name = self.current_dict_file FileToPython(model_file_name, True).parse() model_file_name = self.past_dict_file FileToPython(model_file_name, True).parse() aliases = Aliases(model_context=self.model_context, wlst_mode=WlstModes.OFFLINE) validator = Validator(self.model_context, aliases, wlst_mode=WlstModes.OFFLINE) variable_map = validator.load_variables( self.model_context.get_variable_file()) model_file_name = self.current_dict_file model_dictionary = cla_helper.merge_model_files( model_file_name, variable_map) variables.substitute(model_dictionary, variable_map, self.model_context) return_code = validator.validate_in_tool_mode( model_dictionary, variables_file_name=None, archive_file_name=None) if return_code == Validator.ReturnCode.STOP: __logger.severe('WLSDPLY-05705', model_file_name) return VALIDATION_FAIL current_dict = model_dictionary model_file_name = self.past_dict_file model_dictionary = cla_helper.merge_model_files( model_file_name, variable_map) variables.substitute(model_dictionary, variable_map, self.model_context) return_code = validator.validate_in_tool_mode( model_dictionary, variables_file_name=None, archive_file_name=None) if return_code == Validator.ReturnCode.STOP: __logger.severe('WLSDPLY-05705', model_file_name) return VALIDATION_FAIL past_dict = model_dictionary except ValidateException, te: __logger.severe('WLSDPLY-20009', _program_name, model_file_name, te.getLocalizedMessage(), error=te, class_name=_class_name, method_name=_method_name) ex = exception_helper.create_compare_exception( te.getLocalizedMessage(), error=te) __logger.throwing(ex, class_name=_class_name, method_name=_method_name) return VALIDATION_FAIL
class ModelFileDiffer: """ This is the main driver for the caller. It compares two model files whether they are json or yaml format. """ def __init__(self, current_dict, past_dict, model_context, output_dir=None): self.current_dict_file = current_dict self.past_dict_file = past_dict self.output_dir = output_dir self.model_context = model_context def get_dictionary(self, file): """ Retrieve the python dictionary from file :param file: disk file containing the python dictionary :return: python dictionary """ true = True false = False fh = open(file, 'r') content = fh.read() return eval(content) def compare(self): """ Do the actual compare of the models. :return: whether the difference is safe for online dynamic update """ _method_name = "compare" # arguments have been verified and same extensions model_file_name = None # validate models first try: if os.path.splitext(self.current_dict_file)[1].lower() == ".yaml": model_file_name = self.current_dict_file FileToPython(model_file_name, True).parse() model_file_name = self.past_dict_file FileToPython(model_file_name, True).parse() aliases = Aliases(model_context=self.model_context, wlst_mode=WlstModes.OFFLINE) validator = Validator(self.model_context, aliases, wlst_mode=WlstModes.OFFLINE) variable_map = validator.load_variables( self.model_context.get_variable_file()) model_file_name = self.current_dict_file model_dictionary = cla_helper.merge_model_files( model_file_name, variable_map) variables.substitute(model_dictionary, variable_map, self.model_context) return_code = validator.validate_in_tool_mode( model_dictionary, variables_file_name=None, archive_file_name=None) if return_code == Validator.ReturnCode.STOP: __logger.severe('WLSDPLY-05705', model_file_name) return VALIDATION_FAIL current_dict = model_dictionary model_file_name = self.past_dict_file model_dictionary = cla_helper.merge_model_files( model_file_name, variable_map) variables.substitute(model_dictionary, variable_map, self.model_context) return_code = validator.validate_in_tool_mode( model_dictionary, variables_file_name=None, archive_file_name=None) if return_code == Validator.ReturnCode.STOP: __logger.severe('WLSDPLY-05705', model_file_name) return VALIDATION_FAIL past_dict = model_dictionary except ValidateException, te: __logger.severe('WLSDPLY-20009', _program_name, model_file_name, te.getLocalizedMessage(), error=te, class_name=_class_name, method_name=_method_name) ex = exception_helper.create_compare_exception( te.getLocalizedMessage(), error=te) __logger.throwing(ex, class_name=_class_name, method_name=_method_name) return VALIDATION_FAIL except VariableException, ve: __logger.severe('WLSDPLY-20009', _program_name, model_file_name, ve.getLocalizedMessage(), error=ve, class_name=_class_name, method_name=_method_name) ex = exception_helper.create_compare_exception( ve.getLocalizedMessage(), error=ve) __logger.throwing(ex, class_name=_class_name, method_name=_method_name) return VALIDATION_FAIL
class ModelFileDiffer: """ This is the main driver for the caller. It compares two model files whether they are json or yaml format. """ def __init__(self, current_dict, past_dict, model_context, output_dir=None): self.current_dict_file = current_dict self.past_dict_file = past_dict self.output_dir = output_dir self.model_context = model_context def get_dictionary(self, file): """ Retrieve the python dictionary from file :param file: disk file containing the python dictionary :return: python dictionary """ true = True false = False fh = open(file, 'r') content = fh.read() return eval(content) def compare(self): """ Do the actual compare of the models. :return: whether the difference is safe for online dynamic update """ _method_name = "compare" # arguments have been verified and same extensions model_file_name = None # validate models first try: if FileUtils.isYamlFile(JFile(os.path.splitext(self.current_dict_file)[1].lower())): model_file_name = self.current_dict_file FileToPython(model_file_name, True).parse() model_file_name = self.past_dict_file FileToPython(model_file_name, True).parse() self.model_context.set_validation_method('lax') aliases = Aliases(model_context=self.model_context, wlst_mode=WlstModes.OFFLINE) validator = Validator(self.model_context, aliases, wlst_mode=WlstModes.OFFLINE) variable_map = validator.load_variables(self.model_context.get_variable_file()) model_file_name = self.current_dict_file model_dictionary = cla_helper.merge_model_files(model_file_name, variable_map) variables.substitute(model_dictionary, variable_map, self.model_context) # Run this utility in stand-alone mode instead of tool mode, # which has stricter checks for the tools. # An archive is not used with the compare models and if the model # references a file in an archive, the compareModel will fail if # running in the stricter tool mode (even with lax). # arg_map = dict() arg_map[CommandLineArgUtil.MODEL_FILE_SWITCH] = model_file_name model_context_copy = self.model_context.copy(arg_map) val_copy = Validator(model_context_copy, aliases, wlst_mode=WlstModes.OFFLINE) return_code = val_copy.validate_in_standalone_mode(model_dictionary, None, archive_file_name=None) if return_code == Validator.ReturnCode.STOP: _logger.severe('WLSDPLY-05705', model_file_name) return VALIDATION_FAIL current_dict = model_dictionary model_file_name = self.past_dict_file model_dictionary = cla_helper.merge_model_files(model_file_name, variable_map) variables.substitute(model_dictionary, variable_map, self.model_context) arg_map[CommandLineArgUtil.MODEL_FILE_SWITCH] = model_file_name model_context_copy = self.model_context.copy(arg_map) val_copy = Validator(model_context_copy, aliases, wlst_mode=WlstModes.OFFLINE) return_code = val_copy.validate_in_standalone_mode(model_dictionary, None, archive_file_name=None) if return_code == Validator.ReturnCode.STOP: _logger.severe('WLSDPLY-05705', model_file_name) return VALIDATION_FAIL past_dict = model_dictionary except ValidateException, te: _logger.severe('WLSDPLY-20009', _program_name, model_file_name, te.getLocalizedMessage(), error=te, class_name=_class_name, method_name=_method_name) ex = exception_helper.create_compare_exception(te.getLocalizedMessage(), error=te) _logger.throwing(ex, class_name=_class_name, method_name=_method_name) return VALIDATION_FAIL except VariableException, ve: _logger.severe('WLSDPLY-20009', _program_name, model_file_name, ve.getLocalizedMessage(), error=ve, class_name=_class_name, method_name=_method_name) ex = exception_helper.create_compare_exception(ve.getLocalizedMessage(), error=ve) _logger.throwing(ex, class_name=_class_name, method_name=_method_name) return VALIDATION_FAIL
class PrepareModel: """ This is the main driver for the caller. It compares two model files whether they are json or yaml format. """ def __init__(self, model_files, model_context, logger, output_dir=None): self.model_files = model_files self.output_dir = output_dir self.model_context = model_context self._aliases = Aliases(model_context=model_context, wlst_mode=WlstModes.OFFLINE, exception_type=ExceptionType.COMPARE) self._logger = logger self._name_tokens_location = LocationContext() self._name_tokens_location.add_name_token('DOMAIN', "testdomain") self.current_dict = None self.cache = OrderedDict() self.secrets_to_generate = sets.Set() def __walk_model_section(self, model_section_key, model_dict, valid_section_folders): _method_name = '__validate_model_section' if model_section_key not in model_dict.keys(): return # only specific top-level sections have attributes attribute_location = self._aliases.get_model_section_attribute_location( model_section_key) valid_attr_infos = [] path_tokens_attr_keys = [] if attribute_location is not None: valid_attr_infos = self._aliases.get_model_attribute_names_and_types( attribute_location) path_tokens_attr_keys = self._aliases.get_model_uses_path_tokens_attribute_names( attribute_location) model_section_dict = model_dict[model_section_key] for section_dict_key, section_dict_value in model_section_dict.iteritems( ): # section_dict_key is either the name of a folder in the # section, or the name of an attribute in the section. validation_location = LocationContext() model_folder_path = model_section_key + ":/" if section_dict_key in valid_attr_infos: # section_dict_key is the name of an attribute in the section self.__walk_attribute(section_dict_key, section_dict_value, valid_attr_infos, path_tokens_attr_keys, model_folder_path, attribute_location) elif section_dict_key in valid_section_folders: # section_dict_key is a folder under the model section # Append section_dict_key to location context validation_location.append_location(section_dict_key) # Call self.__validate_section_folder() passing in section_dict_value as the model_node to process self.__walk_section_folder(section_dict_value, validation_location) def __walk_section_folder(self, model_node, validation_location): _method_name = '__validate_section_folder' model_folder_path = self._aliases.get_model_folder_path( validation_location) if self._aliases.supports_multiple_mbean_instances( validation_location): for name in model_node: expanded_name = name new_location = LocationContext(validation_location) name_token = self._aliases.get_name_token(new_location) if name_token is not None: new_location.add_name_token(name_token, expanded_name) value_dict = model_node[name] self.__walk_model_node(value_dict, new_location) elif self._aliases.requires_artificial_type_subfolder_handling( validation_location): for name in model_node: expanded_name = name new_location = LocationContext(validation_location) name_token = self._aliases.get_name_token(new_location) if name_token is not None: new_location.add_name_token(name_token, expanded_name) value_dict = model_node[name] self.__walk_model_node(value_dict, new_location) else: name_token = self._aliases.get_name_token(validation_location) if name_token is not None: name = self._name_tokens_location.get_name_for_token( name_token) if name is None: name = '%s-0' % name_token validation_location.add_name_token(name_token, name) self.__walk_model_node(model_node, validation_location) def __walk_model_node(self, model_node, validation_location): _method_name = '__process_model_node' valid_folder_keys = self._aliases.get_model_subfolder_names( validation_location) valid_attr_infos = self._aliases.get_model_attribute_names_and_types( validation_location) model_folder_path = self._aliases.get_model_folder_path( validation_location) for key, value in model_node.iteritems(): if key in valid_folder_keys: new_location = LocationContext( validation_location).append_location(key) if self._aliases.is_artificial_type_folder(new_location): # key is an ARTIFICIAL_TYPE folder valid_attr_infos = self._aliases.get_model_attribute_names_and_types( new_location) self.__walk_attributes(value, valid_attr_infos, new_location) else: self.__walk_section_folder(value, new_location) elif key in valid_attr_infos: # aliases.get_model_attribute_names_and_types(location) filters out # attributes that ARE NOT valid in the wlst_version being used, so if # we're in this section of code we know key is a bonafide "valid" attribute valid_data_type = valid_attr_infos[key] if valid_data_type in ['properties']: valid_prop_infos = {} properties = validation_utils.get_properties(value) self.__walk_properties(properties, valid_prop_infos, model_folder_path, validation_location) else: path_tokens_attr_keys = \ self._aliases.get_model_uses_path_tokens_attribute_names(validation_location) self.__walk_attribute(key, value, valid_attr_infos, path_tokens_attr_keys, model_folder_path, validation_location) def __walk_attributes(self, attributes_dict, valid_attr_infos, validation_location): _method_name = '__validate_attributes' path_tokens_attr_keys = self._aliases.get_model_uses_path_tokens_attribute_names( validation_location) model_folder_path = self._aliases.get_model_folder_path( validation_location) for attribute_name, attribute_value in attributes_dict.iteritems(): self.__walk_attribute(attribute_name, attribute_value, valid_attr_infos, path_tokens_attr_keys, model_folder_path, validation_location) def __walk_attribute(self, attribute_name, attribute_value, valid_attr_infos, path_tokens_attr_keys, model_folder_path, validation_location): _method_name = '__walk_attribute' if attribute_name in valid_attr_infos: expected_data_type = valid_attr_infos[attribute_name] if (expected_data_type == 'password') or (attribute_name == ADMIN_USERNAME): self.__substitute_password_with_token(model_folder_path, attribute_name, validation_location) self._logger.exiting(class_name=_class_name, method_name=_method_name) def __walk_properties(self, properties_dict, valid_prop_infos, model_folder_path, validation_location): _method_name = '__walk_properties' for property_name, property_value in properties_dict.iteritems(): valid_prop_infos[ property_name] = validation_utils.get_python_data_type( property_value) self.__walk_property(property_name, property_value, valid_prop_infos, model_folder_path, validation_location) def __walk_property(self, property_name, property_value, valid_prop_infos, model_folder_path, validation_location): _method_name = '__walk_property' self._logger.entering(property_name, property_value, str(valid_prop_infos), model_folder_path, class_name=_class_name, method_name=_method_name) if property_name in valid_prop_infos: expected_data_type = valid_prop_infos[property_name] if expected_data_type == 'password': self.__substitute_password_with_token(model_folder_path, property_name, validation_location) def __substitute_password_with_token(self, model_path, attribute_name, validation_location): """ Add the secret for the specified attribute to the cache. If the target specifies credentials_method: secrets, substitute the secret token into the model. :param model_path: text representation of the model path :param attribute_name: the name of the attribute or (property) :param validation_location: the model location """ model_path_tokens = model_path.split('/') tokens_length = len(model_path_tokens) variable_name = variable_injector_functions.format_variable_name( validation_location, attribute_name, self._aliases) if tokens_length > 1: credentials_method = self.model_context.get_target_configuration( ).get_credentials_method() # by default, don't do any assignment to attribute model_value = None # use attribute name for admin password if model_path_tokens[0] == 'domainInfo:' and model_path_tokens[ 1] == '': cache_key = attribute_name else: cache_key = variable_name # for normal secrets, assign the secret name to the attribute if credentials_method == SECRETS_METHOD: model_value = target_configuration_helper.format_as_secret_token( cache_key, self.model_context.get_target_configuration()) self.cache[cache_key] = '' # for config override secrets, assign a placeholder password to the attribute. # config overrides will be used to override the value in the target domain. if credentials_method == CONFIG_OVERRIDES_SECRETS_METHOD: if attribute_name == ADMIN_USERNAME: model_value = ADMINUSER_PLACEHOLDER else: model_value = PASSWORD_PLACEHOLDER self.cache[cache_key] = '' if model_value is not None: p_dict = self.current_dict for index in range(0, len(model_path_tokens)): token = model_path_tokens[index] if token == '': break if token[-1] == ':': token = token[:-1] p_dict = p_dict[token] p_dict[attribute_name] = model_value def walk(self): """ Replace password attributes in each model file with secret tokens, and write each model. Generate a script to create the required secrets. Create any additional output specified for the target environment. """ _method_name = "walk" model_file_name = None try: model_file_list = self.model_files.split(',') for model_file in model_file_list: self.cache.clear() if os.path.splitext(model_file)[1].lower() == ".yaml": model_file_name = model_file FileToPython(model_file_name, True).parse() aliases = Aliases(model_context=self.model_context, wlst_mode=WlstModes.OFFLINE) validator = Validator(self.model_context, aliases, wlst_mode=WlstModes.OFFLINE) # Just merge and validate but without substitution model_dictionary = cla_helper.merge_model_files( model_file_name, None) variable_file = self.model_context.get_variable_file() if not os.path.exists(variable_file): variable_file = None return_code = validator.validate_in_tool_mode( model_dictionary, variables_file_name=variable_file, archive_file_name=None) if return_code == Validator.ReturnCode.STOP: self._logger.severe('WLSDPLY-05705', model_file_name) return VALIDATION_FAIL self.current_dict = model_dictionary self.__walk_model_section( model.get_model_domain_info_key(), self.current_dict, aliases.get_model_section_top_level_folder_names( DOMAIN_INFO)) self.__walk_model_section( model.get_model_topology_key(), self.current_dict, aliases.get_model_topology_top_level_folder_names()) self.__walk_model_section( model.get_model_resources_key(), self.current_dict, aliases.get_model_resources_top_level_folder_names()) self.current_dict = self._apply_filter_and_inject_variable( self.current_dict, self.model_context, validator) file_name = os.path.join(self.output_dir, os.path.basename(model_file_name)) fos = JFileOutputStream(file_name, False) writer = JPrintWriter(fos, True) pty = PythonToYaml(self.current_dict) pty._write_dictionary_to_yaml_file(self.current_dict, writer) writer.close() self.cache.clear() for key in self.secrets_to_generate: self.cache[key] = '' # use a merged, substituted, filtered model to get domain name and create additional target output. full_model_dictionary = cla_helper.load_model( _program_name, self.model_context, self._aliases, "discover", WlstModes.OFFLINE) target_configuration_helper.generate_k8s_script( self.model_context, self.cache, full_model_dictionary) # create any additional outputs from full model dictionary target_configuration_helper.create_additional_output( Model(full_model_dictionary), self.model_context, self._aliases, ExceptionType.VALIDATE) except ValidateException, te: self._logger.severe('WLSDPLY-20009', _program_name, model_file_name, te.getLocalizedMessage(), error=te, class_name=_class_name, method_name=_method_name) ex = exception_helper.create_compare_exception( te.getLocalizedMessage(), error=te) self._logger.throwing(ex, class_name=_class_name, method_name=_method_name) return VALIDATION_FAIL except VariableException, ve: self._logger.severe('WLSDPLY-20009', _program_name, model_file_name, ve.getLocalizedMessage(), error=ve, class_name=_class_name, method_name=_method_name) ex = exception_helper.create_compare_exception( ve.getLocalizedMessage(), error=ve) self._logger.throwing(ex, class_name=_class_name, method_name=_method_name) return VALIDATION_FAIL
def walk(self): """ Replace password attributes in each model file with secret tokens, and write each model. Generate a script to create the required secrets. Create any additional output specified for the target environment. """ _method_name = "walk" model_file_name = None try: model_file_list = self.model_files.split(',') for model_file in model_file_list: self.cache.clear() if os.path.splitext(model_file)[1].lower() == ".yaml": model_file_name = model_file FileToPython(model_file_name, True).parse() aliases = Aliases(model_context=self.model_context, wlst_mode=WlstModes.OFFLINE) validator = Validator(self.model_context, aliases, wlst_mode=WlstModes.OFFLINE) # Just merge and validate but without substitution model_dictionary = cla_helper.merge_model_files( model_file_name, None) variable_file = self.model_context.get_variable_file() if not os.path.exists(variable_file): variable_file = None return_code = validator.validate_in_tool_mode( model_dictionary, variables_file_name=variable_file, archive_file_name=None) if return_code == Validator.ReturnCode.STOP: self._logger.severe('WLSDPLY-05705', model_file_name) return VALIDATION_FAIL self.current_dict = model_dictionary self.__walk_model_section( model.get_model_domain_info_key(), self.current_dict, aliases.get_model_section_top_level_folder_names( DOMAIN_INFO)) self.__walk_model_section( model.get_model_topology_key(), self.current_dict, aliases.get_model_topology_top_level_folder_names()) self.__walk_model_section( model.get_model_resources_key(), self.current_dict, aliases.get_model_resources_top_level_folder_names()) self.current_dict = self._apply_filter_and_inject_variable( self.current_dict, self.model_context, validator) file_name = os.path.join(self.output_dir, os.path.basename(model_file_name)) fos = JFileOutputStream(file_name, False) writer = JPrintWriter(fos, True) pty = PythonToYaml(self.current_dict) pty._write_dictionary_to_yaml_file(self.current_dict, writer) writer.close() self.cache.clear() for key in self.secrets_to_generate: self.cache[key] = '' # use a merged, substituted, filtered model to get domain name and create additional target output. full_model_dictionary = cla_helper.load_model( _program_name, self.model_context, self._aliases, "discover", WlstModes.OFFLINE) target_configuration_helper.generate_k8s_script( self.model_context, self.cache, full_model_dictionary) # create any additional outputs from full model dictionary target_configuration_helper.create_additional_output( Model(full_model_dictionary), self.model_context, self._aliases, ExceptionType.VALIDATE) except ValidateException, te: self._logger.severe('WLSDPLY-20009', _program_name, model_file_name, te.getLocalizedMessage(), error=te, class_name=_class_name, method_name=_method_name) ex = exception_helper.create_compare_exception( te.getLocalizedMessage(), error=te) self._logger.throwing(ex, class_name=_class_name, method_name=_method_name) return VALIDATION_FAIL
def walk(self): _method_name = "walk" model_file_name = None try: model_file_list = self.model_files.split(',') for model_file in model_file_list: self.cache.clear() if os.path.splitext(model_file)[1].lower() == ".yaml": model_file_name = model_file FileToPython(model_file_name, True).parse() aliases = Aliases(model_context=self.model_context, wlst_mode=WlstModes.OFFLINE) validator = Validator(self.model_context, aliases, wlst_mode=WlstModes.OFFLINE) # Just merge and validate but without substitution model_dictionary = cla_helper.merge_model_files(model_file_name, None) variable_file = self.model_context.get_variable_file() if not os.path.exists(variable_file): variable_file=None return_code = validator.validate_in_tool_mode(model_dictionary, variables_file_name=variable_file, archive_file_name=None) if return_code == Validator.ReturnCode.STOP: __logger.severe('WLSDPLY-05705', model_file_name) return VALIDATION_FAIL self.current_dict = model_dictionary self.__walk_model_section(model.get_model_domain_info_key(), self.current_dict, aliases.get_model_section_top_level_folder_names(DOMAIN_INFO)) self.__walk_model_section(model.get_model_topology_key(), self.current_dict, aliases.get_model_topology_top_level_folder_names()) self.__walk_model_section(model.get_model_resources_key(), self.current_dict, aliases.get_model_resources_top_level_folder_names()) self.current_dict = self._apply_filter_and_inject_variable(self.current_dict, self.model_context, validator) file_name = os.path.join(self.output_dir, os.path.basename(model_file_name)) fos = JFileOutputStream(file_name, False) writer = JPrintWriter(fos, True) pty = PythonToYaml(self.current_dict) pty._write_dictionary_to_yaml_file(self.current_dict, writer) writer.close() self.cache.clear() for key in self.secrets_to_generate: self.cache[key] = '' target_configuration_helper.generate_k8s_script(self.model_context.get_kubernetes_variable_file(), self.cache) except ValidateException, te: __logger.severe('WLSDPLY-20009', _program_name, model_file_name, te.getLocalizedMessage(), error=te, class_name=_class_name, method_name=_method_name) ex = exception_helper.create_compare_exception(te.getLocalizedMessage(), error=te) __logger.throwing(ex, class_name=_class_name, method_name=_method_name) return VALIDATION_FAIL
class PrepareModel: """ This is the main driver for the caller. It compares two model files whether they are json or yaml format. """ def __init__(self, model_files, model_context, logger, output_dir=None): self.model_files = model_files self.output_dir = output_dir self.model_context = model_context self._aliases = Aliases(model_context=model_context, wlst_mode=WlstModes.OFFLINE, exception_type=ExceptionType.COMPARE) self._logger = logger self._name_tokens_location = LocationContext() self._name_tokens_location.add_name_token('DOMAIN', "testdomain") self.current_dict = None self.credential_injector = CredentialInjector(_program_name, None, model_context) def __walk_model_section(self, model_section_key, model_dict, valid_section_folders): _method_name = '__walk_model_section' if model_section_key not in model_dict.keys(): return # only specific top-level sections have attributes attribute_location = self._aliases.get_model_section_attribute_location(model_section_key) valid_attr_infos = [] if attribute_location is not None: valid_attr_infos = self._aliases.get_model_attribute_names_and_types(attribute_location) model_section_dict = model_dict[model_section_key] for section_dict_key, section_dict_value in model_section_dict.iteritems(): # section_dict_key is either the name of a folder in the # section, or the name of an attribute in the section. model_location = LocationContext() if section_dict_key in valid_attr_infos: # section_dict_key is the name of an attribute in the section self.__walk_attribute(model_section_dict, section_dict_key, attribute_location) elif section_dict_key in valid_section_folders: # section_dict_key is a folder under the model section # Append section_dict_key to location context model_location.append_location(section_dict_key) # Call self.__validate_section_folder() passing in section_dict_value as the model_node to process self.__walk_section_folder(section_dict_value, model_location) def __walk_section_folder(self, model_node, validation_location): _method_name = '__walk_section_folder' if self._aliases.supports_multiple_mbean_instances(validation_location): for name in model_node: expanded_name = name new_location = LocationContext(validation_location) name_token = self._aliases.get_name_token(new_location) if name_token is not None: new_location.add_name_token(name_token, expanded_name) value_dict = model_node[name] self.__walk_model_node(value_dict, new_location) elif self._aliases.requires_artificial_type_subfolder_handling(validation_location): for name in model_node: expanded_name = name new_location = LocationContext(validation_location) name_token = self._aliases.get_name_token(new_location) if name_token is not None: new_location.add_name_token(name_token, expanded_name) value_dict = model_node[name] self.__walk_model_node(value_dict, new_location) else: name_token = self._aliases.get_name_token(validation_location) if name_token is not None: name = self._name_tokens_location.get_name_for_token(name_token) if name is None: name = '%s-0' % name_token validation_location.add_name_token(name_token, name) self.__walk_model_node(model_node, validation_location) def __walk_model_node(self, model_node, validation_location): _method_name = '__walk_model_node' valid_folder_keys = self._aliases.get_model_subfolder_names(validation_location) valid_attr_infos = self._aliases.get_model_attribute_names_and_types(validation_location) for key, value in model_node.iteritems(): if key in valid_folder_keys: new_location = LocationContext(validation_location).append_location(key) if self._aliases.is_artificial_type_folder(new_location): # key is an ARTIFICIAL_TYPE folder valid_attr_infos = self._aliases.get_model_attribute_names_and_types(new_location) self.__walk_attributes(value, valid_attr_infos, new_location) else: self.__walk_section_folder(value, new_location) elif key in valid_attr_infos: # aliases.get_model_attribute_names_and_types(location) filters out # attributes that ARE NOT valid in the wlst_version being used, so if # we're in this section of code we know key is a bonafide "valid" attribute self.__walk_attribute(model_node, key, validation_location) def __walk_attributes(self, attributes_dict, valid_attr_infos, validation_location): _method_name = '__walk_attributes' for attribute_name, attribute_value in attributes_dict.iteritems(): if attribute_name in valid_attr_infos: self.__walk_attribute(attributes_dict, attribute_name, validation_location) def __walk_attribute(self, model_dict, attribute_name, attribute_location): _method_name = '__walk_attribute' self.credential_injector.check_and_tokenize(model_dict, attribute_name, attribute_location) self._logger.exiting(class_name=_class_name, method_name=_method_name) def walk(self): """ Replace password attributes in each model file with secret tokens, and write each model. Generate a script to create the required secrets. Create any additional output specified for the target environment. """ _method_name = "walk" model_file_name = None # create a merged model that is not substituted merged_model_dictionary = {} try: model_file_list = self.model_files.split(',') for model_file in model_file_list: if os.path.splitext(model_file)[1].lower() == ".yaml": model_file_name = model_file FileToPython(model_file_name, True).parse() aliases = Aliases(model_context=self.model_context, wlst_mode=WlstModes.OFFLINE) validator = Validator(self.model_context, aliases, wlst_mode=WlstModes.OFFLINE) # Just merge and validate but without substitution model_dictionary = cla_helper.merge_model_files(model_file_name, None) variable_file = self.model_context.get_variable_file() if not os.path.exists(variable_file): variable_file = None return_code = validator.validate_in_tool_mode(model_dictionary, variables_file_name=variable_file, archive_file_name=None) if return_code == Validator.ReturnCode.STOP: self._logger.severe('WLSDPLY-05705', model_file_name) return VALIDATION_FAIL self.current_dict = model_dictionary self.__walk_model_section(model.get_model_domain_info_key(), self.current_dict, aliases.get_model_section_top_level_folder_names(DOMAIN_INFO)) self.__walk_model_section(model.get_model_topology_key(), self.current_dict, aliases.get_model_topology_top_level_folder_names()) self.__walk_model_section(model.get_model_resources_key(), self.current_dict, aliases.get_model_resources_top_level_folder_names()) self.current_dict = self._apply_filter_and_inject_variable(self.current_dict, self.model_context) file_name = os.path.join(self.output_dir, os.path.basename(model_file_name)) fos = JFileOutputStream(file_name, False) writer = JPrintWriter(fos, True) pty = PythonToYaml(self.current_dict) pty._write_dictionary_to_yaml_file(self.current_dict, writer) writer.close() cla_helper.merge_model_dictionaries(merged_model_dictionary, self.current_dict, None) # filter variables or secrets that are no longer in the merged, filtered model filter_helper.apply_filters(merged_model_dictionary, "discover", self.model_context) self.credential_injector.filter_unused_credentials(merged_model_dictionary) # use a merged, substituted, filtered model to get domain name and create additional target output. full_model_dictionary = cla_helper.load_model(_program_name, self.model_context, self._aliases, "discover", WlstModes.OFFLINE) target_config = self.model_context.get_target_configuration() if target_config.uses_credential_secrets(): target_configuration_helper.generate_k8s_script(self.model_context, self.credential_injector.get_variable_cache(), full_model_dictionary, ExceptionType.VALIDATE) # create any additional outputs from full model dictionary target_configuration_helper.create_additional_output(Model(full_model_dictionary), self.model_context, self._aliases, self.credential_injector, ExceptionType.VALIDATE) except ValidateException, te: self._logger.severe('WLSDPLY-20009', _program_name, model_file_name, te.getLocalizedMessage(), error=te, class_name=_class_name, method_name=_method_name) ex = exception_helper.create_compare_exception(te.getLocalizedMessage(), error=te) self._logger.throwing(ex, class_name=_class_name, method_name=_method_name) return VALIDATION_FAIL except VariableException, ve: self._logger.severe('WLSDPLY-20009', _program_name, model_file_name, ve.getLocalizedMessage(), error=ve, class_name=_class_name, method_name=_method_name) ex = exception_helper.create_compare_exception(ve.getLocalizedMessage(), error=ve) self._logger.throwing(ex, class_name=_class_name, method_name=_method_name) return VALIDATION_FAIL
class ModelFileDiffer: """ This is the main driver for the caller. It compares two model files whether they are json or yaml format. """ def __init__(self, current_dict, past_dict, model_context, output_dir=None): self.current_dict_file = current_dict self.past_dict_file = past_dict self.output_dir = output_dir self.model_context = model_context # For recursive calls self.all_changes = [] self.all_added = [] self.all_removed = [] self.compare_msgs = sets.Set() def get_dictionary(self, file): """ Retrieve the python dictionary from file :param file: disk file containing the python dictionary :return: python dictionary """ true = True false = False fh = open(file, 'r') content = fh.read() return eval(content) def compare(self): """ Do the actual compare of the models. :return: whether the difference is safe for online dynamic update """ _method_name = "compare" # arguments have been verified and same extensions model_file_name = None # validate models first try: if FileUtils.isYamlFile( JFile(os.path.splitext( self.current_dict_file)[1].lower())): model_file_name = self.current_dict_file FileToPython(model_file_name, True).parse() model_file_name = self.past_dict_file FileToPython(model_file_name, True).parse() self.model_context.set_validation_method('lax') aliases = Aliases(model_context=self.model_context, wlst_mode=WlstModes.OFFLINE, exception_type=ExceptionType.COMPARE) validator = Validator(self.model_context, aliases, wlst_mode=WlstModes.OFFLINE) variable_map = validator.load_variables( self.model_context.get_variable_file()) model_file_name = self.current_dict_file model_dictionary = cla_helper.merge_model_files( model_file_name, variable_map) variables.substitute(model_dictionary, variable_map, self.model_context) arg_map = dict() arg_map[CommandLineArgUtil.MODEL_FILE_SWITCH] = model_file_name model_context_copy = self.model_context.copy(arg_map) val_copy = Validator(model_context_copy, aliases, wlst_mode=WlstModes.OFFLINE) # any variables should have been substituted at this point return_code = val_copy.validate_in_tool_mode( model_dictionary, variables_file_name=None, archive_file_name=None) if return_code == Validator.ReturnCode.STOP: _logger.severe('WLSDPLY-05705', model_file_name) return VALIDATION_FAIL current_dict = model_dictionary model_file_name = self.past_dict_file model_dictionary = cla_helper.merge_model_files( model_file_name, variable_map) variables.substitute(model_dictionary, variable_map, self.model_context) arg_map[CommandLineArgUtil.MODEL_FILE_SWITCH] = model_file_name model_context_copy = self.model_context.copy(arg_map) val_copy = Validator(model_context_copy, aliases, wlst_mode=WlstModes.OFFLINE) return_code = val_copy.validate_in_tool_mode( model_dictionary, variables_file_name=None, archive_file_name=None) if return_code == Validator.ReturnCode.STOP: _logger.severe('WLSDPLY-05705', model_file_name) return VALIDATION_FAIL past_dict = model_dictionary except ValidateException, te: _logger.severe('WLSDPLY-20009', _program_name, model_file_name, te.getLocalizedMessage(), error=te, class_name=_class_name, method_name=_method_name) ex = exception_helper.create_compare_exception( te.getLocalizedMessage(), error=te) _logger.throwing(ex, class_name=_class_name, method_name=_method_name) return VALIDATION_FAIL except VariableException, ve: _logger.severe('WLSDPLY-20009', _program_name, model_file_name, ve.getLocalizedMessage(), error=ve, class_name=_class_name, method_name=_method_name) ex = exception_helper.create_compare_exception( ve.getLocalizedMessage(), error=ve) _logger.throwing(ex, class_name=_class_name, method_name=_method_name) return VALIDATION_FAIL