def __output_latex_one_constraint(self, cname, cnstrt): '''Output one constraint.''' cname = latex2.__strescape(cname) tracer.debug("Output constraint [%s]." % cname) self.__fd.write(u"%% CONSTRAINT '%s'\n" % cname) self.__fd.write(u"\\%s{%s}\\label{CONSTRAINT%s}\n" "\\textbf{Description:} %s\n" % (self.level_names[1], cnstrt.get_value("Name").get_content(), cname, cnstrt.get_value( "Description").get_content())) if cnstrt.is_val_av_and_not_null("Rationale"): self.__fd.write(u"\n\\textbf{Rationale:} %s\n" % cnstrt.get_value("Rationale").get_content()) if cnstrt.is_val_av_and_not_null("Note"): self.__fd.write(u"\n\\textbf{Note:} %s\n" % cnstrt.get_value("Note").get_content()) # Write out the references to the requirements reqs_refs = [] for req in self.__constraints_reqs_ref[cname]: refid = latex2.__strescape(req) refctr = "\\ref{%s} \\nameref{%s}" \ % (refid, refid) reqs_refs.append(refctr) self.__fd.write(u"\n\\textbf{Requirements:} %s\n" % ", ".join(reqs_refs)) tracer.debug("Finished.")
def __output_latex_one_testcase(self, cname, cnstrt): '''Output one testcase.''' cname = latex2.__strescape(cname) tracer.debug("Output testcase [%s]." % cname) self.__fd.write(u"%% TEST-CASE '%s'\n" % cname) self.__fd.write(u"\\%s{%s}\\label{TESTCASE%s}\n" "\\textbf{Description:} %s\n" % (self.level_names[1], cnstrt.get_value("Name").get_content(), cname, cnstrt.get_value( "Description").get_content())) if cnstrt.is_val_av_and_not_null("Expected Result"): self.__fd.write(u"\n\\textbf{Expected Result:} %s\n" % cnstrt.get_value( "Expected Result").get_content()) if cnstrt.is_val_av_and_not_null("Rationale"): self.__fd.write(u"\n\\textbf{Rationale:} %s\n" % cnstrt.get_value("Rationale").get_content()) if cnstrt.is_val_av_and_not_null("Note"): self.__fd.write(u"\n\\textbf{Note:} %s\n" % cnstrt.get_value("Note").get_content()) tracer.debug("Finished.")
def rewrite(reqset): """Do a DFS and compute the priority during that way. If there is a node which was already visited, only recompute the subtree, if the new priority is higher. """ tracer.debug("Called.") def handle_priorization(node, inc_weight): """The second argument (the number) is the weight of the outgoing edge. """ tracer.debug("Node [%s] inc_weight [%4.3f]", node.get_id(), inc_weight) # This is the weight which is inherited weight = inc_weight * node.get_value("Factor") # If there is none, or if the current priority is lower # that the newly computed, recompute this node and # everything beneath. if not node.is_value_available("Priority") \ or node.get_value("Priority") < weight: tracer.debug("Node [%s] set priority to [%4.3f]", node.get_id(), weight) node.set_value("Priority", weight) for out_node in node.outgoing: tracer.debug( "Recursive call to node [%s] with weight [%4.3f]", out_node.get_id(), weight) handle_priorization(out_node, weight) # Start at the root (master) node and evaluate all nodes # there. for req in reqset.get_master_nodes(): handle_priorization(req, 1.0)
def __init__(self, config): '''Creates the output module handler.''' tracer.debug("Called.") self.__config = config self.__cmad_file = None # Some output statistics are collected here. self.__ostats = []
def __create_output_module(self, output_name): '''Creates the module object.''' tracer.debug("Creating output module [%s]" % output_name) # pylint: disable=W0612 output_module = self.__load_output_module(output_name) # Create the constructor object. return eval("output_module.%s" % output_name)
def handle_modules_tag(self, reqs): if self.mods == None: return for modkey, module in self.mods.get_tagtype(self.tbhtags).items(): try: tracer.debug("handle modules tag modkey [%s] tagtype [%s]" % (modkey, self.tbhtags)) if self.tbhtags not in module.get_type_set(): logger.error( LogFormatter.format( 90, "Wrong module type [%s] not in [%s]" % (self.tbhtags, module.get_type_set()) ) ) continue key, value = module.rewrite(self.id, reqs) # Check if there is already a key with the current key # in the map. if key in self.values: logger.error(LogFormatter.format(54, "tag [%s] already defined" % (key), self.id)) self._set_not_usable() # Also continue to get possible further error # messages. self.values[key] = value except RMTException, rmte: # Some semantic error occurred: do not interpret key or # value. logger.error(LogFormatter.rmte(rmte)) logger.error(LogFormatter.format(41, "semantic error occurred in " "module [%s]" % modkey, self.id)) self._set_not_usable()
def __resolve_solved_by_one_req_deps(self, req): content = req.brmo["Solved by"].get_content() # If available, it must not empty if not content: logger.error(LogFormatter.format( 77, "'Solved by' field has length 0", req.get_id())) return False # Step through the list dep_list = content.split() tracer.debug("dependent list [%s]", dep_list) for dep in dep_list: if dep not in self.__requirements: logger.error(LogFormatter.format( 74, "'Solved by' points to a " "non-existing requirement '%s'" % dep, req.get_id())) return False # It is not allowed to have self-references: it does not # make any sense, that a requirement references itself. if dep == req.get_id(): logger.error(LogFormatter.format( 75, "'Solved by' points to the " "requirement itself", req.get_id())) return False # Mark down the depends on... dep_req = self.__requirements[dep] # This is exactly the other way as used in the 'Depends on' tracer.debug("Add edge [%s] -> [%s]", dep_req.get_id(), req.get_id()) Digraph.create_edge(req, dep_req) # Delete the original tag del req.brmo["Solved by"] return True
def __init__(self, oconfig): '''Create a graph output object.''' tracer.debug("Called.") StdOutputParams.__init__(self, oconfig) CreateMakeDependencies.__init__(self) self.__ofile = None tracer.debug("Finished.")
def __init__(self, oconfig): """Create a req statistics object.""" tracer.debug("Called.") StdOutputParams.__init__(self, oconfig) CreateMakeDependencies.__init__(self) self.__ofile = None tracer.debug("Finished.")
def __read(self, tname, input_handler, commit, file_info, req_set): '''Read in the topic and create all the tags.''' Encoding.check_unicode(tname) self.__tags = TxtRecord.from_string( file_info.get_content(), tname, input_handler.get_txt_io_config()) for tag in self.__tags: # If the topic has subtopics, read them also in. if tag.get_tag() == "SubTopic": lfile_info = input_handler.get_file_info_with_type( commit, "topics", tag.get_content() + ".tic") ntopic = Topic(self.__digraph, self._config, input_handler, commit, lfile_info, req_set) self.__digraph.add_node(ntopic) Digraph.create_edge(self, ntopic) elif tag.get_tag() == "Name": if self.__topic_name is not None: # There can (currently) be only one name assert False self.__topic_name = tag.get_content() elif tag.get_tag() == "IncludeRequirements": if tag.get_content() != "full": raise RMTException(113, "IncludeRequirements value not " "supported [%s]" % tag.get_content(), self.name) self.__requirements = req_set.restrict_to_topics(tname) tracer.debug("Found [%d] requirements for topic [%s]", self.__requirements.get_requirements_cnt(), tname) # Check for the existence of the name if self.__topic_name is None: raise RMTException(62, "Mandatory tag 'Name' not given in topic", self.name)
def __read(self, tname, input_handler, commit, file_info, req_set): """Read in the topic and create all the tags.""" self.__tags = TxtRecord.from_string(file_info.get_content(), tname, input_handler.get_txt_io_config()) for tag in self.__tags: # If the topic has subtopics, read them also in. if tag.get_tag() == "SubTopic": lfile_info = input_handler.get_file_info_with_type(commit, "topics", tag.get_content() + ".tic") ntopic = Topic(self.__topicset, self._config, input_handler, commit, lfile_info, req_set) # The topic itself is already added in the constrcutor of Topic. # Therefore there is no need to add it here (again). # self.__topicset.add_node(ntopic) self.__topicset.create_edge(self, ntopic) elif tag.get_tag() == "Name": if self.__topic_name != None: # TODO: Multiple Names assert False self.__topic_name = tag.get_content() elif tag.get_tag() == "IncludeRequirements": if tag.get_content() != "full": raise RMTException( 113, "IncludeRequirements value not " "supported [%s]" % tag.get_content(), self.get_name() ) self.__requirements = req_set.restrict_to_topics(tname) tracer.debug( "Found [%d] requirements for topic [%s]." % (self.__requirements.get_requirements_cnt(), tname) ) # Check for the existence of the name if self.__topic_name == None: raise RMTException(62, "Mandatory tag 'Name' not given in topic", self.get_name())
def topic_set_post(self, _): '''Finish major entry and close file.''' tracer.debug("Called.") self.__output_file.write(self.__req_dep_graph) self.__output_file.write("}\n") self.__output_file.close() tracer.debug("Finished.")
def __resolve_depends_on_one_req_impl(self, req): tag_content = req.brmo["Depends on"] # If available, it must not empty if not tag_content.get_content(): print("+++ ERROR %s: 'Depends on' field has len 0" % (req.get_id())) return False # Step through the list tag_content_split = tag_content.get_content().split() for split_tag in tag_content_split: if split_tag not in self.get_all_requirement_ids(): logger.error(LogFormatter.format( 47, "'Depends on' points to a " "non-existing requirement '%s'" % split_tag, req.get_id())) return False # It is not allowed to have self-references: it does not # make any sense, that a requirement references itself. if split_tag == req.get_id(): logger.error(LogFormatter.format( 59, "'Depends on' points to the " "requirement itself", req.get_id())) return False # Mark down the depends on... dep_req = self.__requirements[split_tag] # This is exactly the other way as used in the 'Depends on' tracer.debug("Add edge [%s] -> [%s]", dep_req.get_id(), req.get_id()) Digraph.create_edge(dep_req, req) # Delete the original tag del req.brmo["Depends on"] return True
def topic_post(self, _topic): '''This is called in the Topic post-phase.''' # Add the xml_task to the current document xml_task = self.__xml_obj_stack.pop() self.__xml_obj_stack[-1].appendChild(xml_task) tracer.debug("Finished; xml document stack length [%s]" % len(self.__xml_obj_stack))
def __create_local_ce3s(self): '''Create the local Constraint Execution Environments and evaluate the given statements. This method does two things: - evaluating the constraints in the CE3 - Resetting the 'Constraints' entry in the requirement (instead of the TextRecord a map of name to constraint object is stored).''' tracer.debug("Called.") for req_name, req in self._named_nodes.items(): # In each case store a (maybe empty) CE3 in the set. ce3 = CE3() cstrnts = req.get_requirement().get_value("Constraints") if cstrnts != None: sval = json.loads(cstrnts.get_content()) cs = {} for s in sval: ctr_name = self.get_ctr_name(s) if not ctr_name in self.__constraints: raise RMTException(88, "Constraint [%s] does not " "exists" % ctr_name) rcs = self.__constraints.get(ctr_name) ce3.eval(rcs, ctr_name, s) cs[ctr_name] = rcs req.get_requirement().set_value("Constraints", cs) # Store the fresh create CE3 into the ce3set self.__ce3set.insert(req_name, ce3) tracer.debug("Finished. Number of constraints [%d]." % self.__ce3set.length())
def topic_post(self, topic): '''Write out the footer and do clean ups.''' fd = self.__fd_stack.pop() self.__ul_open_stack.pop() self.output_html_topic_write_footer(fd) fd.close() tracer.debug("Finished: topic name [%s]" % topic.get_name())
def __read_one_requirement(self, fileinfo, input_mods, object_cache): '''Read in one requirement from the file info.''' tracer.debug("Called.") # Check for correct filename if not fileinfo.get_filename().endswith(".req"): tracer.info("skipping file [%s]" % fileinfo.get_filename()) return # Handle caching. vcs_id = fileinfo.get_vcs_id() rid = fileinfo.get_filename_sub_part()[:-4] req = object_cache.get("Requirement", vcs_id) tracer.info("Reading requirement [%s]" % rid) if req == None: file_content = fileinfo.get_content() req = Requirement(file_content, rid, fileinfo.get_filename(), input_mods, self._config) # Add the requirement to the cache. object_cache.add(vcs_id, "Requirement", req) self._adapt_usablility(req) if req.is_usable(): dnreq = RequirementDNode(req) # Store in the map, so that it is easy to access the # node by id. ### ToDo: needed self._add_requirement(req) self.add_node(dnreq) # Also store it in the digraph's node list for simple # access to the digraph algorithms. # self.nodes.append(req) else: logger.error(LogFormatter.format( 45, "could not be parsed", req.id)) tracer.debug("Finished.")
def cmad_topic_continuum_pre(self, _): '''Write out the one and only dependency to all the requirements.''' tracer.debug("Called.") CreateMakeDependencies.write_reqs_dep(self._cmad_file, self._output_filename) self._cmad_file.write(u"REQS_LATEX2=%s\n" % self._output_filename)
def rewrite(self, reqset): '''Does a DFS and compute the priority during that way. If there is a node which was already visited, only recompute the subtree, if the new priority is higher.''' tracer.debug("Called.") def handle_priorization(node, inc_weight): '''The second argument (the number) is the weight of the outgoing edge.''' req = node.get_requirement() tracer.debug("Node [%s] inc_weight [%4.3f]" % (req.get_id(), inc_weight)) # This is the weight which is inherited weight = inc_weight * req.get_value("Factor") # If there is none, or if the current priority is lower # that the newly computed, recompute this node and # everything beneath. if not req.is_value_available("Priority") \ or req.get_value("Priority") < weight: tracer.debug("Node [%s] set priority to [%4.3f]" % (req.get_id(), weight)) req.set_value("Priority", weight) for nout in node.get_iter_outgoing(): tracer.debug("Recursive call to node [%s] " "with weight [%4.3f]" % (nout.get_requirement().get_id(), weight)) handle_priorization(nout, weight) # Start at the root (master) node and evaluate all nodes # there. for req in reqset.get_master_nodes(): handle_priorization(req, 1.0)
def __read_one_testcase(self, fileinfo, input_mods, object_cache): '''Read in one testcase from the file info.''' tracer.debug("Called.") # Check for correct filename if not fileinfo.get_filename().endswith(".tec"): tracer.info("skipping file [%s]" % fileinfo.get_filename()) return # Handle caching. vcs_id = fileinfo.get_vcs_id() rid = fileinfo.get_filename_sub_part()[:-4] testcase = object_cache.get("TestCase", vcs_id) tracer.info("Reading testcase [%s]" % rid) if testcase == None: file_content = fileinfo.get_content() testcase = TestCase(file_content, rid, fileinfo.get_filename(), input_mods, self._config) # Add the requirement to the cache. object_cache.add(vcs_id, "TestCase", testcase) self._adapt_usablility(testcase) if testcase.is_usable(): # Store in the map, so that it is easy to access the # node by id. self._add_testcase(testcase) # Also store it in the digraph's node list for simple # access to the digraph algorithms. # self.nodes.append(req) else: logger.error(LogFormatter.format( 115, "could not be parsed", testcase.id)) tracer.debug("Finished.")
def __resolve_depends_on_one_req(self, req_node, also_solved_by): tracer.debug("Called.") req = req_node.get_requirement() if req.get_value("Type") == Requirement.rt_master_requirement: # There must no 'Depends on' if "Depends on" in req.brmo: print("+++ ERROR %s: initial requirement has " "Depends on field." % (req.id)) return False # It self does not have any depends on nodes req.graph_depends_on = None # This is the master! return True # For all other requirements types there must be a 'Depends on' if "Depends on" not in req.brmo: if also_solved_by: # Skip handling this requirement return True print("+++ ERROR %s: non-initial requirement has " "no 'Depends on' field." % (req.id)) return False t = req.brmo["Depends on"] # If available, it must not empty if len(t.get_content()) == 0: print("+++ ERROR %s: 'Depends on' field has len 0" % (req.id)) return False # Step through the list tl = t.get_content().split() for ts in tl: if ts not in self.get_all_requirement_ids(): logger.error(LogFormatter.format( 47, "'Depends on' points to a " "non-existing requirement '%s'" % ts, req.id)) return False # It is not allowed to have self-references: it does not # make any sense, that a requirement references itself. if ts == req.id: logger.error(LogFormatter.format( 59, "'Depends on' points to the " "requirement itself", req.id)) return False # Mark down the depends on... dep_req_node = self._named_nodes[ts] # This is exactly the other way as used in the 'Depends on' tracer.debug("Add edge [%s] -> [%s]" % (dep_req_node.get_requirement().get_id(), req.get_id())) Digraph.create_edge(self, dep_req_node, req_node) # Copy and delete the original tag ## XXX Not neede any more? req.tags["Depends on"] = t.split() del req.brmo["Depends on"] return True
def get_topic_names_flattened(self): """Returns all the names of the complete topic hirarchy in one set.""" tracer.debug("Called: name [%s]." % self.get_name()) result = set() result.add(self.get_name()) for topic in self.get_iter_outgoing(): result = result.union(topic.get_topic_names_flattened()) return result
def insert(self, name, ce3): '''Add a new ce3.''' tracer.debug("Insert ce3 for requirement [%s]", name) if ce3 is None: return if ce3 in self.__ce3s: assert False self.__ce3s[name] = ce3
def topic_pre(self, topic): '''Output one topic. This method is called once for each topic and subtopic.''' tracer.debug("Called: topic name [%s]." % topic.get_name()) fd = self.__ouput_html_topic_open_output_file(topic.get_name(), "w") self.__output_html_topic_write_header(fd) self.__fd_stack.append(fd) self.__ul_open_stack.append(False)
def rewrite(reqset): """The constrains value gets a dictionary from the name of the constraints to the object. """ tracer.debug("Called.") reqset.resolve_ce3() tracer.debug("Finished.") return True
def __init__(self, config): '''Creates the output module handler.''' tracer.debug("Called.") self.__config = config self.__cmad_file = None self.__plugin_manager = extension.ExtensionManager( namespace='rmtoo.output.plugin', invoke_on_load=False)
def topic_set_pre(self, _): '''This is the first thing which is called.''' tracer.debug("Called.") self.__output_file = file(self._output_filename, "w") self.__output_file.write( "digraph reqdeps {\nrankdir=BT;\nmclimit=10.0;\n" "nslimit=10.0;ranksep=1;\n") tracer.debug("Finished.")
def topic_set_pre(self, _requirement_set): '''This is call in the RequirementSet pre-phase.''' tracer.debug("Called") # Initialize the graph output self.__output_file = file(self._output_filename, "w") self.__output_file.write( "digraph reqdeps {\nrankdir=BT;\nmclimit=10.0;\n" "nslimit=10.0;ranksep=1;\n")
def __read_all_requirements(self, input_handler, commit, input_mods, object_cache): '''Read in all the requirements from the input handler.''' tracer.debug("Called.") fileinfos = input_handler.get_file_infos(commit, "requirements") for fileinfo in fileinfos: self.__read_one_requirement(fileinfo, input_mods, object_cache) tracer.debug("Finished.")
def __read_all_testcases(self, input_handler, commit, input_mods, object_cache): '''Read in all the testcases from the input handler.''' tracer.debug("Called."); fileinfos = input_handler.get_file_infos(commit, "testcases") for fileinfo in fileinfos: self.__read_one_testcase(fileinfo, input_mods, object_cache) tracer.debug("Finished.");
def _extended_directory_check(self, directory): '''Checks if all the directories are the in repository. The absolute path is computed if the path is relative and then compared to the repository base directory.''' tracer.debug("called: directory [%s]", directory) if self.__repo_base_dir is None: self.__setup_repo(directory) if not directory.startswith(self.__repo_base_dir): raise RMTException(28, "directory [%s] not in repository" % directory) return
def restrict_to_topics(self, topic_set): '''Restrict the list (dictionary) of requirements to the given topic set - i.e. only requirements are returned which belong to one of the topics in the topic set.''' tracer.debug("Called.") restricted_reqs = RequirementSet(self._config) for req in self.__requirements.values(): if req.get_topic() in topic_set: restricted_reqs = self.__restrict_to_topics_one_req( restricted_reqs, req) return restricted_reqs
def get_file_info_with_type(self, commit, file_type, filename): '''Returns the FileInfo object for the given filename.''' assert commit == None tracer.debug("called: file type [%s] filename [%s]" % (file_type, filename)) for directory in self.__dirs[file_type]: tracer.debug("searching in directory [%s]" % directory) full_path = os.path.join(directory, filename) if os.path.exists(full_path): return FileSystem.FileInfo(directory, filename) raise RMTException(112, "file [%s] in [%s] base file not found" % (filename, file_type))
def __init_continuum_set(self): '''Initialize the continuum: Check the configuration for the appropriate interval parameters and read in the TopicContinuum.''' tracer.debug("Called.") # Step through all the available topic sets. for ts_name, ts_config in iteritems(self._config.get_value("topics")): topic_cont = TopicContinuum(ts_name, self._config, ts_config, self.__object_cache, self.__input_mods) self.__continuum[ts_name] = topic_cont self._adapt_usablility(topic_cont) tracer.debug("Finished; count [%d]", len(self.__continuum))
def __resolve_solved_by_one_req(self, req): '''Resolve the 'Solved by' for one requirement.''' tracer.debug("Called: requirement id [%s]", req.get_id()) # Add node to digraph self.add_node(req) # It is a 'normal' case when there is no 'Solved by' (until now). if "Solved by" not in req.brmo: return True return self.__resolve_solved_by_one_req_deps(req)
def get(self, object_type, oid): '''Tries to receive an object with the given id. If found, the object is returned, if not found None is returned.''' tracer.debug("called: object type [%s] oid [%s]" % (object_type, oid)) self.__stats_cnt_get += 1 if self.__objects.has_key(object_type) \ and self.__objects[object_type].has_key(oid): self.__stats_cnt_get_found += 1 return self.__objects[object_type][oid] return None
def __init__(self, digraph, config, input_handler, commit, file_info, req_set): tname = file_info.get_filename_sub_part()[:-4] # The 'name' in the digraph node is the ID Digraph.Node.__init__(self, tname) # This is the name of the topic (short description) self.__topic_name = None self.__tags = None self._config = config tracer.debug("Called: name [%s]", tname) self.__digraph = digraph self.__requirements = None self.__read(tname, input_handler, commit, file_info, req_set)
def get_file_infos(self, commit, dir_type): '''Return all fileinfos of the given commit and of the given directory type.''' assert commit == None tracer.debug("called: directory type [%s]" % dir_type) result = [] if dir_type not in self.__dirs: '''Key not available: no files.''' return result for directory in self.__dirs[dir_type]: result.extend(self.__get_file_infos_from_dir(directory)) return result
def get_file_info_with_type(self, commit, file_type, filename): '''Returns the FileInfo object for the given filename.''' tracer.debug("called: commit [%s] file type [%s] filename [%s]", commit, file_type, filename) for directory in self.__dirs[file_type]: tracer.debug("searching in directory [%s]", directory) blob = self.__get_blob(commit, directory, filename) if blob is not None: dir_split = directory.split("/") sub_split = os.path.dirname(filename).split("/") return Git.FileInfo(dir_split, sub_split, blob) raise RMTException(111, "file [%s] in [%s] base file not found" % (filename, file_type))
def resolve_depends_on(self, also_solved_by): '''Step through the internal list of collected requirements and evaluate the 'Depends on'. This is done by creating the appropriate digraph node.''' tracer.debug("Called.") # Run through all the requirements and look for the 'Depend # on' (depending on the type of the requirement) success = True for req in self.__requirements.values(): if not self.__resolve_depends_on_one_req(req, also_solved_by): success = False tracer.debug("Finished; success [%s]", success) return success
def __init__(self, base_dir, sub_dir, blob): self.__base_dir = base_dir self.__blob = blob self.__sub_dir = sub_dir self.__base_dirname = os.path.join(*self.__base_dir) self.__sub_dirname = "" if len(self.__sub_dir) > 0: self.__sub_dirname = os.path.join(*self.__sub_dir) tracer.debug(self) self.__filename = os.path.join(self.__base_dirname, self.__sub_dirname, self.__blob.name)
def __init__(self, oconfig): '''Create a graph output object.''' tracer.debug("Called: html ouput module constructed.") self._config = Cfg(oconfig) CreateMakeDependencies.__init__(self) self.__fd_stack = [] self.__topic_name_set = [] # Take care about the openess of the ul. self.__ul_open_stack = [] self.__output_directory = self._config.get_rvalue('output_directory') self.html_header_filename = self._config.get_rvalue('header') self.html_footer_filename = self._config.get_rvalue('footer') self.read_html_arts()
def rewrite(self, reqset): tracer.debug("Called.") components = connected_components(reqset) if components.get_length() == 1: # Everything is ok: graph is connected tracer.debug("Finished.") return True raise RMTException( 69, "Requirements graph has two or more connected " "components. Please fix the edges between the nodes." "Found components: %s" % components.as_string())
def __setup_repo(self, directory): '''Sets up the repository.''' tracer.debug("called") # Get one sample directory and create the repository from this. # Check all if they are in the same repository. # Note: because every directory list must contain at least one # directory, use just one. repo_found = False while len(directory) > 1: try: tracer.debug("using [%s] as sample directory" % directory) self.__repo = git.Repo(directory) repo_found = True break except git.exc.NoSuchPathError: tracer.debug("Sample directory [%s] does not exists" % directory) directory = os.path.dirname(directory) if not repo_found: assert False # :-4: cut off the '/.git'. self.__repo_base_dir = self.__repo.git_dir[:-5] tracer.debug("repository base directory [%s]" % self.__repo_base_dir)
def get_file_infos(self, commit, dir_type): '''Return all fileinfos of the given commit and of the given directory type.''' tracer.debug("called: commit [%s] directory type [%s]" % (commit, dir_type)) if dir_type not in self.__dirs: tracer.debug("Skipping non existent directory for [%s]" % dir_type) return [] result = [] for directory in self.__dirs[dir_type]: result.extend( self.__get_file_infos_from_tree(commit.tree, directory)) return result
def __init__(self, input_mods, config): '''Sets up a TopicContinuum for use.''' tracer.info("called") UsableFlag.__init__(self) self.__input_mods = input_mods self._config = config # This dictionary holds all the TopicSetCollections # available in the configured time period. self.__continuum = {} # Store objects with IDs also in the cache - so that they can be reused. self.__object_cache = ObjectCache() self.__init_continuum_set() self.__object_cache.log_stats() tracer.debug("Finished.")
def main_impl(args, mstdout, mstderr): tracer.debug("Called.") config, mods = MainHelper.main_setup(args, mstdout, mstderr) file_system_if = FileSystem(config) object_cache = ObjectCache() rs = RequirementSet(config) command_line_args = config.get_rvalue('general.command_line_arguments') rs.read_requirements(file_system_if, None, mods, object_cache) return rs.normalize_dependencies() \ and rs.write_to_filesystem(command_line_args[0])
def execute(self, executor, func_prefix): '''Execute the parts which are needed for TopicsContinuum.''' tracer.debug("Calling pre [%s]", self.name) FuncCall.pcall(executor, func_prefix + "topic_pre", self) tracer.debug("Calling sub [%s]", self.name) for tag in self.__tags: rtag = tag.get_tag() if rtag == "Name": FuncCall.pcall(executor, func_prefix + "topic_name", tag.get_content()) continue if rtag == "SubTopic": subtopic = self.__digraph.find(tag.get_content()) assert subtopic is not None FuncCall.pcall(executor, func_prefix + "topic_sub_pre", subtopic) subtopic.execute(executor, func_prefix) FuncCall.pcall(executor, func_prefix + "topic_sub_post", subtopic) continue if rtag == "IncludeRequirements": self.__requirements.execute(executor, func_prefix) continue if rtag == "Text": FuncCall.pcall(executor, func_prefix + "topic_text", tag.get_content()) continue raise RMTException(114, "Unknown tag in topic [%s]" % rtag, self.name) tracer.debug("Calling post [%s]", self.name) FuncCall.pcall(executor, func_prefix + "topic_post", self) tracer.debug("Finished [%s]", self.name)
def __eval_link(self, req_a, req_b): '''Add all the links between all topics of req_a and req_b.''' # If either one of the requirements is not in the topic, # skip this step if req_a.get_requirement().get_id() not in self.__req2topics \ or req_b.get_requirement().get_id() not in self.__req2topics: tracer.debug("One of the requirements is not in the topic - " "skipping evaluation [%s] [%s]" % (req_a.get_id(), req_b.get_id())) return for topic_a in self.__req2topics[req_a.get_requirement().get_id()]: for topic_b in self.__req2topics[req_b.get_requirement().get_id()]: self._add_topic_relation(topic_a, topic_b)
def __init__(self, base_dir, sub_dir, blob): Interface.FileInfo.__init__(self) self.__base_dir = base_dir self.__blob = blob self.__sub_dir = sub_dir self.__base_dirname = os.path.join(*self.__base_dir) self.__sub_dirname = "" if self.__sub_dir: self.__sub_dirname = os.path.join(*self.__sub_dir) tracer.debug(self) self.__filename = os.path.join( self.__base_dirname, self.__sub_dirname, self.__blob.name)
def resolve_solved_by(self): '''Step through the internal list of collected requirements and evaluate the 'Solved by'. This is done by creating the appropriate digraph nodes.''' tracer.debug("Called.") # Run through all the requirements and look for the 'Solved # by' success = True for req in self.__requirements.values(): if not self.__resolve_solved_by_one_req(req): tracer.info("Handling of requirement [%s] was not successful", req.get_id()) success = False tracer.debug("Finished; success [%s]", success) return success
def __read_topics(self): '''Read in the topics for this topic set. Also topics are handled by the object cache. Note that the algorithm has a basic difference to the one used to read in the requirements. This one known the base topic and therefore all dependent sub-topics - the algorithm reading in the requirements just takes all the available files.''' tracer.debug("Called.") topic_base = self.__input_handler.get_topic_base_file_info( self.__commit) tracer.debug("Topic base [%s]." % topic_base) return Topic(self, self._config, self.__input_handler, self.__commit, topic_base, self.__complete_requirement_set)
def __init__(self, config): '''Constructs a RequirementSet. This does not read everything in: please use the appropriate method to do so.''' tracer.debug("Called.") Digraph.__init__(self) UsableFlag.__init__(self) self._config = config self.__master_nodes = None # The key is the id the value the constraint. self.__constraints = {} # This holds only ready to use CE3 objects. self.__ce3set = CE3Set() # All the test cases for this requirement set self.__testcases = {} tracer.debug("Finished.")
def execute(self, executor, func_prefix): '''Execute the parts which are needed for TopicsSet.''' tracer.debug("Calling pre.") FuncCall.pcall(executor, func_prefix + "topic_set_pre", self) tracer.debug("Calling sub topic.") self.__topic_set.execute(executor, func_prefix) tracer.debug("Calling post.") FuncCall.pcall(executor, func_prefix + "topic_set_post", self) tracer.debug("Finished.")
def rewrite(reqset): """The rewrite method checks if there is only one connected component. If not an error is printed including all the found components. """ tracer.debug("Called.") components = connected_components(reqset) if components.get_length() == 1: # Everything is ok: graph is connected tracer.debug("Finished.") return True raise RMTException( 69, "Requirements graph has two or more connected " "components. Please fix the edges between the nodes." "Found components: %s" % components.as_string())
def add(self, oid, object_type, obj): '''Adds the given object to the cache using the given object id. Checks of the object is of the correct type and if the object is already in the cache.''' tracer.debug("adding object with object type [%s] oid [%s]" % (object_type, oid)) if not self.__objects.has_key(object_type): self.__stats_cnt_object_types += 1 self.__objects[object_type] = {} if oid in self.__objects[object_type]: assert False raise RMTException(106, "object with oid [%s] already in cache." % oid) self.__stats_cnt_objects += 1 self.__objects[object_type][oid] = obj
def main_func(args, mstdout, mstderr): """The 'real' main function. Sets up everything, reads in the requirements and writes out everything. """ tracer.debug("Called.") config, mods = MainHelper.main_setup(args, mstdout, mstderr) file_system_if = FileSystem(config) object_cache = ObjectCache() req_set = RequirementSet(config) command_line_args = config.get_rvalue('general.command_line_arguments') req_set.read_requirements(file_system_if, None, mods, object_cache) return req_set.normalize_dependencies() \ and req_set.write_to_filesystem(command_line_args[0])
def __setup_directories(self, cfg): '''Cleans up and unifies the directories.''' tracer.debug("Called.") for dir_type in ["requirements", "topics", "constraints", "testcases"]: dirs = cfg.get_rvalue_default(dir_type + "_dirs", None) if dirs == None: tracer.info("Directory [%s] not configured - skipping.", dir_type) continue self._check_list_of_strings(dir_type, dirs) new_directories = [] for directory in dirs: new_directories.append(directory) self.__dirs[dir_type] = new_directories for dir_type, directory in self.__dirs.iteritems(): tracer.debug("[%s] directories [%s]" % (dir_type, directory))
def execute(self, executor, func_prefix): '''Execute the parts which are needed for RequirementSet.''' tracer.debug("calling pre") FuncCall.pcall(executor, func_prefix + "requirement_set_pre", self) tracer.debug("calling sub requirement set") for requirement in executor.requirement_set_sort( self.__requirements.values()): requirement.execute(executor, func_prefix) tracer.debug("calling post") FuncCall.pcall(executor, func_prefix + "requirement_set_post", self) tracer.debug("finished")
def execute(self, executor, func_prefix): '''Execute the parts which are needed for TopicsContinuum.''' tracer.debug("Calling pre [%s]", self.__name) FuncCall.pcall(executor, func_prefix + "topic_continuum_pre", self) tracer.debug("Calling sub [%s]", self.__name) for topic_set in executor.topic_continuum_sort(self.__vcs_commit_ids, self.__topic_sets): topic_set.execute(executor, func_prefix) tracer.debug("Calling post [%s]", self.__name) FuncCall.pcall(executor, func_prefix + "topic_continuum_post", self) tracer.debug("Finished [%s]", self.__name)