def xml_check_children(xml_doc_a, xml_doc_b, xpath): '''Create a shallow copy of b's children (and remove nodes which are seen as equal).''' bcn = copy.copy(xml_doc_b.childNodes) # Iterate through the child nodes of 'a' ... for a_children in xml_doc_a.childNodes: tracer.debug( LogFormatter.format( 97, "xmlcmp: comparing child node [%s]" % a_children)) # ... check if there is the same one in 'b' found_ac = False for b_children in bcn: result, err_msg = xmlequals(a_children, b_children, xpath + "/" + xml_doc_a.tagName) if result: # a_children and b_children are equal: remove b_children # from bcn and skip to the next a_children bcn.remove(b_children) found_ac = True tracer.debug(LogFormatter.format(98, "[%s] xmlcmp: found equal subtrees [%s]" \ % (xpath, a_children))) tracer.debug(LogFormatter.format(99, "[%s] xmlcmp: remaining elements [%s]" \ % (xpath, bcn))) break if not found_ac: return False, "Child node [%s] not found at [%s] - " \ "last error was [%s]" % (a_children, xpath, err_msg) assert (len(bcn) == 0) return True, None
def handle_modules_tag(self, reqs): if self.mods == None: return for modkey, module in self.mods.get_tagtype(self.tbhtags).items(): try: tracer.debug("handle modules tag modkey [%s] tagtype [%s]" % (modkey, self.tbhtags)) if self.tbhtags not in module.get_type_set(): logger.error( LogFormatter.format( 90, "Wrong module type [%s] not in [%s]" % (self.tbhtags, module.get_type_set()) ) ) continue key, value = module.rewrite(self.id, reqs) # Check if there is already a key with the current key # in the map. if key in self.values: logger.error(LogFormatter.format(54, "tag [%s] already defined" % (key), self.id)) self._set_not_usable() # Also continue to get possible further error # messages. self.values[key] = value except RMTException, rmte: # Some semantic error occurred: do not interpret key or # value. logger.error(LogFormatter.rmte(rmte)) logger.error(LogFormatter.format(41, "semantic error occurred in " "module [%s]" % modkey, self.id)) self._set_not_usable()
def _handle_modules(self, input_mods): '''Handle all modules which are executed on the requirement set level. (One '_' only because this is used by the unit tests.''' tracer.debug("Called.") # Dependencies can be done, if all requirements are successfully # read in. self.__handle_modules_reqdeps(input_mods) # If there was an error, the state flag is set: tracer.debug("Check usability.") if not self.is_usable(): logger.error(LogFormatter.format( 43, "there was a problem handling the " "requirement set modules")) return False # The must no be left tracer.debug("Check all handled.") if not self.__all_tags_handled(): logger.error(LogFormatter.format( 56, "There were errors encountered during parsing " "and checking - can't continue.")) return False return True
def __resolve_solved_by_one_req_deps(self, req): content = req.brmo["Solved by"].get_content() # If available, it must not empty if not content: logger.error(LogFormatter.format( 77, "'Solved by' field has length 0", req.get_id())) return False # Step through the list dep_list = content.split() tracer.debug("dependent list [%s]", dep_list) for dep in dep_list: if dep not in self.__requirements: logger.error(LogFormatter.format( 74, "'Solved by' points to a " "non-existing requirement '%s'" % dep, req.get_id())) return False # It is not allowed to have self-references: it does not # make any sense, that a requirement references itself. if dep == req.get_id(): logger.error(LogFormatter.format( 75, "'Solved by' points to the " "requirement itself", req.get_id())) return False # Mark down the depends on... dep_req = self.__requirements[dep] # This is exactly the other way as used in the 'Depends on' tracer.debug("Add edge [%s] -> [%s]", dep_req.get_id(), req.get_id()) Digraph.create_edge(req, dep_req) # Delete the original tag del req.brmo["Solved by"] return True
def __resolve_depends_on_one_req_impl(self, req): tag_content = req.brmo["Depends on"] # If available, it must not empty if not tag_content.get_content(): print("+++ ERROR %s: 'Depends on' field has len 0" % (req.get_id())) return False # Step through the list tag_content_split = tag_content.get_content().split() for split_tag in tag_content_split: if split_tag not in self.get_all_requirement_ids(): logger.error(LogFormatter.format( 47, "'Depends on' points to a " "non-existing requirement '%s'" % split_tag, req.get_id())) return False # It is not allowed to have self-references: it does not # make any sense, that a requirement references itself. if split_tag == req.get_id(): logger.error(LogFormatter.format( 59, "'Depends on' points to the " "requirement itself", req.get_id())) return False # Mark down the depends on... dep_req = self.__requirements[split_tag] # This is exactly the other way as used in the 'Depends on' tracer.debug("Add edge [%s] -> [%s]", dep_req.get_id(), req.get_id()) Digraph.create_edge(dep_req, req) # Delete the original tag del req.brmo["Depends on"] return True
def xml_check_children(xml_doc_a, xml_doc_b, xpath): '''Create a shallow copy of b's children (and remove nodes which are seen as equal).''' bcn = [] for child in xml_doc_b.childNodes: bcn.append(copy.copy(child)) # Iterate through the child nodes of 'a' ... bcn_found_cnt = len(bcn) for a_children in xml_doc_a.childNodes: tracer.debug(LogFormatter.format( 97, "xmlcmp: comparing child node [%s]" % a_children)) # ... check if there is the same one in 'b' found_ac = False for b_children in bcn: result, err_msg = xmlequals(a_children, b_children, xpath + "/" + xml_doc_a.tagName) if result: # a_children and b_children are equal: remove b_children # from bcn and skip to the next a_children bcn_found_cnt -= 1 found_ac = True tracer.debug(LogFormatter.format( 98, "[%s] xmlcmp: found equal subtrees [%s]" % (xpath, a_children))) tracer.debug(LogFormatter.format( 99, "[%s] xmlcmp: remaining elements [%s]" % (xpath, bcn))) break if not found_ac: return False, "Child node [%s] not found at [%s] - " \ "last error was [%s]" % (a_children, xpath, err_msg) assert bcn_found_cnt == 0 return True, None
def __resolve_depends_on_one_req(self, req_node, also_solved_by): tracer.debug("Called.") req = req_node.get_requirement() if req.get_value("Type") == Requirement.rt_master_requirement: # There must no 'Depends on' if "Depends on" in req.brmo: print("+++ ERROR %s: initial requirement has " "Depends on field." % (req.id)) return False # It self does not have any depends on nodes req.graph_depends_on = None # This is the master! return True # For all other requirements types there must be a 'Depends on' if "Depends on" not in req.brmo: if also_solved_by: # Skip handling this requirement return True print("+++ ERROR %s: non-initial requirement has " "no 'Depends on' field." % (req.id)) return False t = req.brmo["Depends on"] # If available, it must not empty if len(t.get_content()) == 0: print("+++ ERROR %s: 'Depends on' field has len 0" % (req.id)) return False # Step through the list tl = t.get_content().split() for ts in tl: if ts not in self.get_all_requirement_ids(): logger.error( LogFormatter.format( 47, "'Depends on' points to a " "non-existing requirement '%s'" % ts, req.id)) return False # It is not allowed to have self-references: it does not # make any sense, that a requirement references itself. if ts == req.id: logger.error( LogFormatter.format( 59, "'Depends on' points to the " "requirement itself", req.id)) return False # Mark down the depends on... dep_req_node = self._named_nodes[ts] # This is exactly the other way as used in the 'Depends on' tracer.debug( "Add edge [%s] -> [%s]" % (dep_req_node.get_requirement().get_id(), req.get_id())) Digraph.create_edge(self, dep_req_node, req_node) # Copy and delete the original tag ## XXX Not neede any more? req.tags["Depends on"] = t.split() del req.brmo["Depends on"] return True
def handle_modules_tag(self, reqs): if self.mods == None: return for modkey, module in self.mods.get_tagtype(self.tbhtags).items(): try: tracer.debug("handle modules tag modkey [%s] tagtype [%s]" % (modkey, self.tbhtags)) if self.tbhtags not in module.get_type_set(): logger.error(LogFormatter.format( 90, "Wrong module type [%s] not in [%s]" % (self.tbhtags, module.get_type_set()))) continue key, value = module.rewrite(self.id, reqs) # Check if there is already a key with the current key # in the map. if key in self.values: logger.error(LogFormatter.format( 54, "tag [%s] already defined" % (key), self.id)) self._set_not_usable() # Also continue to get possible further error # messages. self.values[key] = value except RMTException, rmte: # Some semantic error occurred: do not interpret key or # value. logger.error(LogFormatter.rmte(rmte)) logger.error(LogFormatter.format( 41, "semantic error occurred in " "module [%s]" % modkey, self.id)) self._set_not_usable()
def __resolve_depends_on_one_req(self, req_node, also_solved_by): tracer.debug("Called.") req = req_node.get_requirement() if req.get_value("Type") == Requirement.rt_master_requirement: # There must no 'Depends on' if "Depends on" in req.brmo: print("+++ ERROR %s: initial requirement has " "Depends on field." % (req.id)) return False # It self does not have any depends on nodes req.graph_depends_on = None # This is the master! return True # For all other requirements types there must be a 'Depends on' if "Depends on" not in req.brmo: if also_solved_by: # Skip handling this requirement return True print("+++ ERROR %s: non-initial requirement has " "no 'Depends on' field." % (req.id)) return False t = req.brmo["Depends on"] # If available, it must not empty if len(t.get_content()) == 0: print("+++ ERROR %s: 'Depends on' field has len 0" % (req.id)) return False # Step through the list tl = t.get_content().split() for ts in tl: if ts not in self.get_all_requirement_ids(): logger.error(LogFormatter.format( 47, "'Depends on' points to a " "non-existing requirement '%s'" % ts, req.id)) return False # It is not allowed to have self-references: it does not # make any sense, that a requirement references itself. if ts == req.id: logger.error(LogFormatter.format( 59, "'Depends on' points to the " "requirement itself", req.id)) return False # Mark down the depends on... dep_req_node = self._named_nodes[ts] # This is exactly the other way as used in the 'Depends on' tracer.debug("Add edge [%s] -> [%s]" % (dep_req_node.get_requirement().get_id(), req.get_id())) Digraph.create_edge(self, dep_req_node, req_node) # Copy and delete the original tag ## XXX Not neede any more? req.tags["Depends on"] = t.split() del req.brmo["Depends on"] return True
def __resolve_solved_by_one_req(self, req_node): '''Resolve the 'Solved by' for one requirement.''' assert isinstance(req_node, RequirementDNode) req = req_node.get_requirement() tracer.debug("Called: requirement id [%s]." % req.get_id()) # It is a 'normal' case when there is no 'Solved by' (until now). if "Solved by" not in req.brmo: return True content = req.brmo["Solved by"].get_content() # If available, it must not empty if len(content) == 0: logger.error( LogFormatter.format(77, "'Solved by' field has length 0", req.get_id())) return False # Step through the list dep_list = content.split() tracer.debug("dependent list [%s]" % dep_list) for dep in dep_list: if dep not in self._named_nodes: logger.error( LogFormatter.format( 74, "'Solved by' points to a " "non-existing requirement '%s'" % dep, req.get_id())) return False # It is not allowed to have self-references: it does not # make any sense, that a requirement references itself. if dep == req.get_id(): logger.error( LogFormatter.format( 75, "'Solved by' points to the " "requirement itself", req.id)) return False # Mark down the depends on... dep_req_node = self._named_nodes[dep] assert isinstance(dep_req_node, RequirementDNode) # This is exactly the other way as used in the 'Depends on' tracer.debug( "Add edge [%s] -> [%s]" % (dep_req_node.get_requirement().get_id(), req.get_id())) Digraph.create_edge(self, req_node, dep_req_node) # Delete the original tag del req.brmo["Solved by"] return True
def split_entries(split_lines, rid, mls, lineno_offset): '''This method splits up the given string in seperate entries which represent a entry record each. The lineno offset is the line number of the first line given in the sl array. ''' doc = [] lineno = lineno_offset success = True while split_lines: try: next_record \ = TxtParser.split_next_record( split_lines, rid, lineno, mls) doc.append(next_record) lineno += len(next_record[1]) + len(next_record[2]) except RMTException as rmte: # This is a hint that the tag line could not correctly # parsed. logger.error(LogFormatter.rmte(rmte)) # Remove the errornous line del split_lines[0] lineno += 1 success = False return success, doc
def __read_one_testcase(self, fileinfo, input_mods, object_cache): '''Read in one testcase from the file info.''' tracer.debug("Called.") # Check for correct filename if not fileinfo.get_filename().endswith(".tec"): tracer.info("skipping file [%s]" % fileinfo.get_filename()) return # Handle caching. vcs_id = fileinfo.get_vcs_id() rid = fileinfo.get_filename_sub_part()[:-4] testcase = object_cache.get("TestCase", vcs_id) tracer.info("Reading testcase [%s]" % rid) if testcase == None: file_content = fileinfo.get_content() testcase = TestCase(file_content, rid, fileinfo.get_filename(), input_mods, self._config) # Add the requirement to the cache. object_cache.add(vcs_id, "TestCase", testcase) self._adapt_usablility(testcase) if testcase.is_usable(): # Store in the map, so that it is easy to access the # node by id. self._add_testcase(testcase) # Also store it in the digraph's node list for simple # access to the digraph algorithms. # self.nodes.append(req) else: logger.error(LogFormatter.format( 115, "could not be parsed", testcase.id)) tracer.debug("Finished.")
def __read_one_requirement(self, fileinfo, input_mods, object_cache): '''Read in one requirement from the file info.''' tracer.debug("Called.") # Check for correct filename if not fileinfo.get_filename().endswith(".req"): tracer.info("skipping file [%s]" % fileinfo.get_filename()) return # Handle caching. vcs_id = fileinfo.get_vcs_id() rid = fileinfo.get_filename_sub_part()[:-4] req = object_cache.get("Requirement", vcs_id) tracer.info("Reading requirement [%s]" % rid) if req == None: file_content = fileinfo.get_content() req = Requirement(file_content, rid, fileinfo.get_filename(), input_mods, self._config) # Add the requirement to the cache. object_cache.add(vcs_id, "Requirement", req) self._adapt_usablility(req) if req.is_usable(): dnreq = RequirementDNode(req) # Store in the map, so that it is easy to access the # node by id. ### ToDo: needed self._add_requirement(req) self.add_node(dnreq) # Also store it in the digraph's node list for simple # access to the digraph algorithms. # self.nodes.append(req) else: logger.error(LogFormatter.format( 45, "could not be parsed", req.id)) tracer.debug("Finished.")
def __read_one_requirement(self, fileinfo, input_mods, object_cache): '''Read in one requirement from the file info.''' tracer.debug("Called.") # Check for correct filename if not fileinfo.get_filename().endswith(".req"): tracer.info("skipping file [%s]", fileinfo.get_filename()) return # Handle caching. vcs_id = fileinfo.get_vcs_id() rid = fileinfo.get_filename_sub_part()[:-4] req = object_cache.get("Requirement", vcs_id) tracer.info("Reading requirement [%s]", rid) if req is None: file_content = fileinfo.get_content() req = Requirement(file_content, rid, fileinfo.get_filename(), input_mods, self._config) # Add the requirement to the cache. object_cache.add(vcs_id, "Requirement", req) self._adapt_usablility(req) if req.is_usable(): # Store in the map, so that it is easy to access the # node by id. self.add_requirement(req) # Also store it in the digraph's node list for simple # access to the digraph algorithms. # self.nodes.append(req) else: logger.error(LogFormatter.format( 45, "could not be parsed", req.get_id())) tracer.debug("Finished.")
def __resolve_solved_by_one_req(self, req_node): '''Resolve the 'Solved by' for one requirement.''' assert isinstance(req_node, RequirementDNode) req = req_node.get_requirement() tracer.debug("Called: requirement id [%s]." % req.get_id()) # It is a 'normal' case when there is no 'Solved by' (until now). if "Solved by" not in req.brmo: return True content = req.brmo["Solved by"].get_content() # If available, it must not empty if len(content) == 0: logger.error(LogFormatter.format( 77, "'Solved by' field has length 0", req.get_id())) return False # Step through the list dep_list = content.split() tracer.debug("dependent list [%s]" % dep_list) for dep in dep_list: if dep not in self._named_nodes: logger.error(LogFormatter.format( 74, "'Solved by' points to a " "non-existing requirement '%s'" % dep, req.get_id())) return False # It is not allowed to have self-references: it does not # make any sense, that a requirement references itself. if dep == req.get_id(): logger.error(LogFormatter.format( 75, "'Solved by' points to the " "requirement itself", req.id)) return False # Mark down the depends on... dep_req_node = self._named_nodes[dep] assert isinstance(dep_req_node, RequirementDNode) # This is exactly the other way as used in the 'Depends on' tracer.debug("Add edge [%s] -> [%s]" % (dep_req_node.get_requirement().get_id(), req.get_id())) Digraph.create_edge(self, req_node, dep_req_node) # Delete the original tag del req.brmo["Solved by"] return True
def __all_tags_handled(self): '''Returns true iff all the different tags are handled.''' all_handled = True for req in self.nodes: if req.brmo: logger.error(LogFormatter.format( 57, "No tag handler found for tag(s) '%s' " "- Hint: typo in tag(s)?" % json.dumps(list(req.brmo.keys())), req.get_id())) all_handled = False return all_handled
def __all_tags_handled(self): '''Returns true iff all the different tags are handled.''' all_handled = True for req_node in self.get_iter_nodes_values(): req = req_node.get_requirement() if len(req.brmo) > 0: logger.error(LogFormatter.format( 57, "No tag handler found for tag(s) '%s' " "- Hint: typo in tag(s)?" % req.brmo.keys(), req.get_id())) all_handled = False return all_handled
def check_line_length(self, sl, rid): max_line_length = self.tioconfig.get_max_line_length() lineno = 0 for l in sl: lineno += 1 if len(l) > max_line_length: logger.error( LogFormatter.format( 80, "line too long: is [%d], " "max allowed [%d]" % (len(l), max_line_length), rid, lineno)) self._set_not_usable()
def __all_tags_handled(self): '''Returns true iff all the different tags are handled.''' all_handled = True for req_node in self.get_iter_nodes_values(): req = req_node.get_requirement() if len(req.brmo) > 0: logger.error( LogFormatter.format( 57, "No tag handler found for tag(s) '%s' " "- Hint: typo in tag(s)?" % req.brmo.keys(), req.get_id())) all_handled = False return all_handled
def check_line_length(self, split_lines, rid): """Check if the given lines are too long (or not)""" max_line_length = self.tioconfig.get_max_line_length() lineno = 0 for line in split_lines: lineno += 1 if len(line) > max_line_length: logger.error( LogFormatter.format( 80, "line too long: is [%d], " "max allowed [%d]" % (len(line), max_line_length), rid, lineno)) self._set_not_usable()
def __read_one_constraint(self, fileinfo, input_mods, object_cache): '''Read in one constraints from the file info.''' result = self.__read_one_element(fileinfo, input_mods, object_cache, ".ctr", "Constraint") if result is None: return if result.is_usable(): # Store in the map, so that it is easy to access the # node by id. self.add_constraint(result) # Also store it in the digraph's node list for simple # access to the digraph algorithms. # self.nodes.append(req) else: logger.error(LogFormatter.format(87, "could not be parsed", result.get_id()))
def split_next_record(split_lines, rid, lineno, _mls): '''Splits off the first record from the given string list. The record is returned and the string list is shortened. Precondition: it can be assumed that len(sl)>0 ''' i = 0 sl_len = len(split_lines) # The first line must contain the tag. retl = TxtParser.re_tag_line.match(split_lines[i]) if not retl: raise RMTException(79, "Expected tag line not found", rid, lineno) content = [] comment = [] # Split first line: the Tag is everyting including the ':' # The content starts directly after this ':' tag = retl.group(1) content.append(retl.group(2)) i += 1 # This is what is needed - to be compatible with the old # specification. while i < sl_len: if TxtParser.re_tag_line.match(split_lines[i]): break elif split_lines[i] and split_lines[i][0] == " ": content.append(split_lines[i]) if comment: # This is the possible problematic case where # continuation lines are intermixed with comments. logger.info(LogFormatter.format( 80, TxtParser.comment_in_req, rid, lineno+i)) elif TxtParser.is_comment_or_empty(split_lines[i]): comment.append(split_lines[i]) i += 1 rec = [tag, content, comment] del split_lines[0:i] return rec
def split_next_record(split_lines, rid, lineno, _mls): '''Splits off the first record from the given string list. The record is returned and the string list is shortened. Precondition: it can be assumed that len(sl)>0 ''' i = 0 sl_len = len(split_lines) # The first line must contain the tag. retl = TxtParser.re_tag_line.match(split_lines[i]) if not retl: raise RMTException(79, "Expected tag line not found", rid, lineno) content = [] comment = [] # Split first line: the Tag is everyting including the ':' # The content starts directly after this ':' tag = retl.group(1) content.append(retl.group(2)) i += 1 # This is what is needed - to be compatible with the old # specification. while i < sl_len: if TxtParser.re_tag_line.match(split_lines[i]): break elif split_lines[i] and split_lines[i][0] == " ": content.append(split_lines[i]) if comment: # This is the possible problematic case where # continuation lines are intermixed with comments. logger.info( LogFormatter.format(80, TxtParser.comment_in_req, rid, lineno + i)) elif TxtParser.is_comment_or_empty(split_lines[i]): comment.append(split_lines[i]) i += 1 rec = [tag, content, comment] del split_lines[0:i] return rec
def internal_convert_to_new(cfg, old_config): '''Converts the old given old_config object to the new configuration using a dictionary.''' cfg.set_value('requirements', {}) # This is done only for housekeeping old_config_dir = dir(old_config) # Remove the system specific from the list old_config_dir.remove('__doc__') old_config_dir.remove('__module__') if hasattr(old_config, 'stakeholders'): cfg.set_value('requirements.stakeholders', old_config.stakeholders) old_config_dir.remove('stakeholders') if hasattr(old_config, 'inventors'): cfg.set_value('requirements.inventors', old_config.inventors) old_config_dir.remove('inventors') # Topic specs must be done before the output_spec, because the # output specs will be inserted into the topic specs. if hasattr(old_config, 'topic_specs'): Old.internal_convert_topics(cfg, old_config.topic_specs) old_config_dir.remove('topic_specs') if hasattr(old_config, 'output_specs'): Old.internal_convert_output(cfg, old_config.output_specs) old_config_dir.remove('output_specs') if hasattr(old_config, 'reqs_spec'): Old.internal_convert_reqs(cfg, old_config.reqs_spec) old_config_dir.remove('reqs_spec') if hasattr(old_config, 'analytics_specs'): Old.internal_convert_analytics(cfg, old_config.analytics_specs) old_config_dir.remove('analytics_specs') if hasattr(old_config, 'constraints_specs'): Old.internal_convert_constraints(cfg, old_config.constraints_specs) old_config_dir.remove('constraints_specs') if len(old_config_dir) > 0: logger.warning( LogFormatter.format( 100, "Old Configuration: " "Not converted attributes: [%s]" % old_config_dir))
def internal_convert_to_new(cfg, old_config): '''Converts the old given old_config object to the new configuration using a dictionary.''' cfg.set_value('requirements', {}) # This is done only for housekeeping old_config_dir = dir(old_config) # Remove the system specific from the list old_config_dir.remove('__doc__') old_config_dir.remove('__module__') if hasattr(old_config, 'stakeholders'): cfg.set_value('requirements.stakeholders', old_config.stakeholders) old_config_dir.remove('stakeholders') if hasattr(old_config, 'inventors'): cfg.set_value('requirements.inventors', old_config.inventors) old_config_dir.remove('inventors') # Topic specs must be done before the output_spec, because the # output specs will be inserted into the topic specs. if hasattr(old_config, 'topic_specs'): Old.internal_convert_topics(cfg, old_config.topic_specs) old_config_dir.remove('topic_specs') if hasattr(old_config, 'output_specs'): Old.internal_convert_output(cfg, old_config.output_specs) old_config_dir.remove('output_specs') if hasattr(old_config, 'reqs_spec'): Old.internal_convert_reqs(cfg, old_config.reqs_spec) old_config_dir.remove('reqs_spec') if hasattr(old_config, 'analytics_specs'): Old.internal_convert_analytics(cfg, old_config.analytics_specs) old_config_dir.remove('analytics_specs') if hasattr(old_config, 'constraints_specs'): Old.internal_convert_constraints(cfg, old_config.constraints_specs) old_config_dir.remove('constraints_specs') if len(old_config_dir) > 0: logger.warning(LogFormatter.format( 100, "Old Configuration: " "Not converted attributes: [%s]" % old_config_dir))