def __init__(self, tbhtags, content, rid, mods, config, type_str, file_path): UsableFlag.__init__(self) # This is the name of the tags which will be handled by the # module input. self.tbhtags = tbhtags # This are the original tags - when there is no # need to convert them to specific values, they are left # here. self.otags = {} # This is the list of converted values. self.values = {} Encoding.check_unicode(rid) self._id = rid self.mods = mods self.config = config Encoding.check_unicode(type_str) self.type_str = type_str self._file_path = file_path self.record = None self.brmo = None # The analytic modules store the results in this map: self.analytics = {} if content is not None: self.__input(content)
def add_deprecated_values(options): '''Add all the values to the dictionary which were specified with the help of the old and deprecated command line options.''' ldict = {} if options.config_file is not None: ldict['configuration'] \ = {'deprecated': {'config_file': Encoding.to_unicode(options.config_file)}} if options.modules_directory is not None: ldict['global'] \ = {'modules': {'directories': [Encoding.to_unicode( options.modules_directory)]}} else: # If there is no modules directory given, use the pyshared one. mod_dir = distutils.sysconfig.get_python_lib() ldict['global'] = {'modules': {'directories': [Encoding.to_unicode(mod_dir)]}} if options.create_makefile_dependencies is not None: ldict['actions'] \ = {'create_makefile_dependencies': Encoding.to_unicode(options.create_makefile_dependencies)} return ldict
def from_string(cls, in_str, rid, tioconfig): '''Construct a TxtRecord from a given string. rid is the Requirement ID.''' Encoding.check_unicode(in_str) obj = cls(tioconfig) obj.parse(in_str, rid) return obj
def __read(self, tname, input_handler, commit, file_info, req_set): '''Read in the topic and create all the tags.''' Encoding.check_unicode(tname) self.__tags = TxtRecord.from_string(file_info.get_content(), tname, input_handler.get_txt_io_config()) for tag in self.__tags: # If the topic has subtopics, read them also in. if tag.get_tag() == "SubTopic": lfile_info = input_handler.get_file_info_with_type( commit, "topics", tag.get_content() + ".tic") ntopic = Topic(self.__digraph, self._config, input_handler, commit, lfile_info, req_set) self.__digraph.add_node(ntopic) Digraph.create_edge(self, ntopic) elif tag.get_tag() == "Name": if self.__topic_name is not None: # There can (currently) be only one name assert False self.__topic_name = tag.get_content() elif tag.get_tag() == "IncludeRequirements": if tag.get_content() != "full": raise RMTException( 113, "IncludeRequirements value not " "supported [%s]" % tag.get_content(), self.name) self.__requirements = req_set.restrict_to_topics(tname) tracer.debug("Found [%d] requirements for topic [%s]", self.__requirements.get_requirements_cnt(), tname) # Check for the existence of the name if self.__topic_name is None: raise RMTException(62, "Mandatory tag 'Name' not given in topic", self.name)
def __read(self, tname, input_handler, commit, file_info, req_set): '''Read in the topic and create all the tags.''' Encoding.check_unicode(tname) self.__tags = TxtRecord.from_string( file_info.get_content(), tname, input_handler.get_txt_io_config()) for tag in self.__tags: # If the topic has subtopics, read them also in. if tag.get_tag() == "SubTopic": lfile_info = input_handler.get_file_info_with_type( commit, "topics", tag.get_content() + ".tic") ntopic = Topic(self.__digraph, self._config, input_handler, commit, lfile_info, req_set) self.__digraph.add_node(ntopic) Digraph.create_edge(self, ntopic) elif tag.get_tag() == "Name": if self.__topic_name is not None: # There can (currently) be only one name assert False self.__topic_name = tag.get_content() elif tag.get_tag() == "IncludeRequirements": if tag.get_content() != "full": raise RMTException(113, "IncludeRequirements value not " "supported [%s]" % tag.get_content(), self.name) self.__requirements = req_set.restrict_to_topics(tname) tracer.debug("Found [%d] requirements for topic [%s]", self.__requirements.get_requirements_cnt(), tname) # Check for the existence of the name if self.__topic_name is None: raise RMTException(62, "Mandatory tag 'Name' not given in topic", self.name)
def __init__(self, content, rid, file_path, mods, config): Encoding.check_unicode(content) Encoding.check_unicode(rid) Digraph.Node.__init__(self, rid) BaseRMObject.__init__(self, InputModuleTypes.reqtag, content, rid, mods, config, u"requirements", file_path)
def dollar_replace(self, value): '''Replaces all occurrences of ${} for different types.''' if Encoding.is_unicode(value): return self.__dollar_replace_string(value) if isinstance(value, list): return self.__dollar_replace_list(value) # Never reached: unknown type print("Cfg never reached [%s] [%s]" % (type(value), value)) assert False
def __input(self, content): '''Read it in from the file (Syntactic input).''' txtio = TxtIOConfig(self.config, self.type_str) Encoding.check_unicode(content) self.record = TxtRecord.from_string(content, self._id, txtio) brmo = self.record.get_dict() # This 'brmo' is always valid - if there is a problem, an exception # is raised. # Handle all the modules (Semantic input) self.handle_modules_tag(brmo) # Do not check for remaining tags here. There must be some # left over: all those that work on the whole requirement set # (e.g. 'Solved by'). # If everything's fine, store the rest of the req for later # inspection. self.brmo = brmo
def rmttest_positive_02(self): "Requirement Tag Effort Estimation - tag given with all valid numbers" config, req = create_parameters() for i in ReqEffortEst.valid_values: req["Effort estimation"] = RecordEntry("Effort estimation", Encoding.to_unicode(i)) rt = ReqEffortEst(config) name, value = rt.rewrite("EffortEstimation-test", req) assert "Effort estimation" == name assert i == value
def parse(self, record, rid): """Parse everything from a string""" # Split up into lines Encoding.check_unicode(record) split_lines = record.split("\n") self.check_line_length(split_lines, rid) self.maybe_remove_last_empty_line(split_lines) self.comment_raw = TxtParser.extract_record_comment(split_lines) for comment in self.comment_raw: Encoding.check_unicode(comment) self.set_comment(TxtParser.extract_comment(self.comment_raw)) Encoding.check_unicode(self.get_comment()) success, parsed_record = TxtParser.split_entries( split_lines, rid, self, len(self.comment_raw) + 1) # If there was an error during the split already - stop # processing here if not success: self._set_not_usable() return for i in parsed_record: self.append(TxtRecordEntry(i)) return
def add_values(soptions, name): '''Add all the new command line parameter values.''' if soptions is None: return {} opts = [] for opt in soptions: uopt = Encoding.to_unicode(opt) if uopt.startswith("file://") or uopt.startswith(name + ":"): opts.append(uopt) else: opts.append(name + ":" + uopt) return {'configuration': {name: opts}}
def cfg_key(key): """Configuration key handling If the key is a string, it is converted to the internally used list of strings. The original string is split at '.'. """ if isinstance(key, list): return key if Encoding.is_unicode(key): return key.split('.') print("Invalid key type [%s]" % type(key)) assert False
def rmttest_negative_01(self): "Requirement Tag Effort Estimation - tag given with invalid numbers" config, req = create_parameters() for i in [4, 6, 7, 9, 10, 11, 12, 14, 15, 16, 17, 18, 19, 20, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32]: req["Effort estimation"] = RecordEntry("Effort estimation", Encoding.to_unicode(i)) rt = ReqEffortEst(config) with pytest.raises(RMTException) as rmte: rt.rewrite("EffortEstimation-test", req) assert 4 == rmte.id()
def __init__(self, tag, content, comment=None): Encoding.check_unicode(tag) self.__tag = tag Encoding.check_unicode(content) self.__content = content if comment is not None: Encoding.check_unicode(comment) self.__comment = comment
def _check_list_of_strings(name, tbc): '''Checks if the given variable is a list of strings or None.''' if tbc is None: tracer.debug("Ignoring non existent configuration for [%s]", tbc) return if not isinstance(tbc, list): assert False raise RMTException(103, "Configuration error: [%s] configuration " "must be a list, is [%s]" % (name, type(tbc))) if not tbc: raise RMTException(105, "Configuration error: [%s] configuration " "must be a non empty list" % name) for string in tbc: if not Encoding.is_unicode(string): raise RMTException(104, "Configuration error: [%s].[%s] " " configuration must be a string" % (name, string))
def execute_cmds(config, input_mods, _mstdout, mstderr): Import.execute(config) # Import foreign data '''Checks are always done - to be sure that e.g. the dependencies are correct. Please note: there is no 'ONE' latest continuum any more - but a list.''' try: topic_continuum_set = TopicContinuumSet(input_mods, config) except RMTException as rmte: mstderr.write("+++ ERROR: Problem reading in the continuum [%s]\n" % Encoding.to_unicode(rmte)) return False # If there is a problem with the last requirement set included in # the requirements continuum and stop processing. (Note the logs # were already written out). if not topic_continuum_set.is_usable(): mstderr.write("+++ ERROR: topic continuum set is not usable.\n") return False # When only the dependencies are needed, output them to the given # file. cmad_filename = config.get_value_wo_throw( 'actions.create_makefile_dependencies') if cmad_filename is not None: Output.execute(config, topic_continuum_set, mstderr, "cmad_") return True # The requirements are syntactically correct now: therefore it is # possible to do some analytics on them. # Note that analytics are only run on the latest version. if not Analytics.execute(config, topic_continuum_set, mstderr): if config.get_bool('processing.analytics.stop_on_errors', True): return False # Output everything Output.execute(config, topic_continuum_set, mstderr, "") return True
def _check_list_of_strings(name, tbc): '''Checks if the given variable is a list of strings or None.''' if tbc is None: tracer.debug("Ignoring non existent configuration for [%s]", tbc) return if not isinstance(tbc, list): assert False raise RMTException( 103, "Configuration error: [%s] configuration " "must be a list, is [%s]" % (name, type(tbc))) if not tbc: raise RMTException( 105, "Configuration error: [%s] configuration " "must be a non empty list" % name) for string in tbc: if not Encoding.is_unicode(string): raise RMTException( 104, "Configuration error: [%s].[%s] " " configuration must be a string" % (name, string))
def execute_cmds(config, input_mods, _mstdout, mstderr): '''Checks are always done - to be sure that e.g. the dependencies are correct. Please note: there is no 'ONE' latest continuum any more - but a list.''' try: topic_continuum_set = TopicContinuumSet(input_mods, config) except RMTException as rmte: mstderr.write("+++ ERROR: Problem reading in the continuum [%s]\n" % Encoding.to_unicode(rmte)) return False # If there is a problem with the last requirement set included in # the requirements continuum and stop processing. (Note the logs # were already written out). if not topic_continuum_set.is_usable(): mstderr.write("+++ ERROR: topic continuum set is not usable.\n") return False # When only the dependencies are needed, output them to the given # file. cmad_filename = config.get_value_wo_throw( 'actions.create_makefile_dependencies') if cmad_filename is not None: Output.execute(config, topic_continuum_set, mstderr, "cmad_") return True # The requirements are syntactically correct now: therefore it is # possible to do some analytics on them. # Note that analytics are only run on the latest version. if not Analytics.execute(config, topic_continuum_set, mstderr): if config.get_bool('processing.analytics.stop_on_errors', True): return False # Output everything Output.execute(config, topic_continuum_set, mstderr, "") return True
def __init__(self, se): '''There must be three entries: 1) initial line with tag 2) possible empty list of continue lines (starting with space) 3) possible empty list of comment and / or empty lines. ''' assert len(se) == 3 Encoding.check_unicode(se[0]) self.tag_raw = se[0] Encoding.check_unicode_list(se[1]) self.content_raw = se[1] Encoding.check_unicode_list(se[2]) self.comment_raw = se[2] # Parse the rest tag = self.tag_raw[0:-1] value = "".join(se[1]) comment = TxtParser.extract_comment(se[2]) RecordEntry.__init__(self, tag, value, comment)
def custom_str_constructor(loader, node): """This takes care that all the configuration is read in using unicode. """ return Encoding.to_unicode(loader.construct_scalar(node))
def __init__(self, name, tags, brmo=None): Digraph.Node.__init__(self, name) Encoding.check_unicode(name) self.id = name self.otags = tags self.brmo = brmo
# (c) 2018 Kristoffer Nordstroem, see COPYING import os import pytest import distutils.file_util from rmtoo.lib.Import import Import from rmtoo.imports.xls import XlsImport from Utils import create_tmp_dir, delete_tmp_dir from rmtoo.lib.Encoding import Encoding LDIR = Encoding.to_unicode(os.path.dirname(os.path.abspath(__file__))) @pytest.fixture(scope='function') def tmpdir(): tmpdir = create_tmp_dir() yield tmpdir delete_tmp_dir(tmpdir) @pytest.fixture def def_cfg(tmpdir): def_cfg_imp_dest = { 'topics': { 'ts_common': { 'sources': [[ 'dummydriver', { 'requirements_dirs': [tmpdir], 'topics_dirs': [tmpdir] } ]]
def rmttest_invalid_config_parser(self): '''Just figure out where it blows up''' dest_dirs = {u'requirements_dirs': [Encoding.to_unicode(tmpdir)], u'topics_dirs': [Encoding.to_unicode(tmpdir)]} importer = XlsImport({}, dest_dirs) assert not importer.useable
def dest_dir(tmpdir): dest_dirs = {u'requirements_dirs': [Encoding.to_unicode(tmpdir)], u'topics_dirs': [Encoding.to_unicode(tmpdir)]} return dest_dirs
def main(): """The main function for the pricing graph""" csvfilename, graphfilename = parse_argv() # The files must be saved according to this rules: # delimiter must be a ',' # quotechar must be a '"' csvr = csv.reader(open(csvfilename, 'rb'), delimiter=',', quotechar='"') # Open the output file and write out the header. graph_fd = open(graphfilename, "w") graph_fd.write("digraph reqdeps {\nrankdir=BT;\nmclimit=10.0;\n" "nslimit=10.0;ranksep=1;\n") # Read in all the rows and store them: there is the need to run # multiple times and in different directions through this list. rows = [] for row in csvr: rows.append(row) # This holds the dependent costs of a requirement. If there is no # entry in this dictionary, there are no dependent costs. dep_costs = {} # Because the nodes are topoligical sorted, start at the end and go # until you reach the beginning. rows.reverse() for row in rows: # Colorize graph nodeparams = [] if row[1] == 'none': nodeparams.append("color=red") elif row[1] == 'partial': nodeparams.append("color=orange") elif row[1] == 'fully': nodeparams.append("color=green") # Compute local costs (lcosts) # Sometimes a ',' is used to seperate 1000 dayrate = float(Encoding.to_unicode(row[2])[:-2].replace(",", "")) days = float(row[3]) material = float(Encoding.to_unicode(row[4])[:-2].replace(",", "")) lcosts = dayrate * days + material # Check if there are dependent costs (dcosts) dcosts = 0.0 if row[0] in dep_costs: dcosts = dep_costs[row[0]] # Compute the overall costs ocosts = lcosts + dcosts # Write out node (attributes) nodeparams.append('label="%s\\n%9.2f\\n%9.2f"' % (row[0], ocosts, lcosts)) graph_fd.write("%s [%s];\n" % (row[0], ",".join(nodeparams))) # Add the current costs to the (possible existant) dep_costs acosts = 0.0 if row[5] in dep_costs: acosts = dep_costs[row[5]] dep_costs[row[5]] = acosts + ocosts # Output all the existant edges for row in rows: if row[5] != '0': graph_fd.write("%s -> %s;\n" % (row[0], row[5])) graph_fd.write("}") graph_fd.close()
def set_comment(self, comment): """Set the comment""" Encoding.check_unicode(comment) self.__comment = comment