def appendSupplementToMarkdownOverview(self, markdownFilePath): """ Append .md files from supplements folder (searches all files recursively within the supplement folder) :param markdownFilePath: The path of the Markdown file to which the data will be appended to :return: nothing """ supplementDirPath = Emma.shared_libs.emma_helper.joinPath( self.projectPath, SUPPLEMENT) supplementFiles = [] if os.path.isdir(supplementDirPath): with open(markdownFilePath, 'a') as markdown: for supplementRootPath, directories, filesInSupplementDir in os.walk( supplementDirPath): for aSupplementFile in filesInSupplementDir: aAbsSupplementFilePath = Emma.shared_libs.emma_helper.joinPath( supplementRootPath, aSupplementFile) supplementFiles.append(aAbsSupplementFilePath) for supplementFile in supplementFiles: try: with open(supplementFile, "r") as supplement: markdown.write(supplement.read()) except FileNotFoundError: # This case should hardly appear since the files were found milliseconds before sc().error( f"The file `{os.path.abspath(supplementFile)}` was not found!" ) else: sc().wwarning( f"A supplement folder does not exist in {self.projectPath}. No supplement files will be attached to the report" )
def __readGlobalConfigJson(path): """ Function to read in and process the globalConfig. :param path: Path of the globalConfig file. :return: The content of the globalConfig. """ # Load the globalConfig file globalConfig = Emma.shared_libs.emma_helper.readJson(path) # Loading the config files of the defined configID-s for configId in list(globalConfig.keys( )): # List of keys required so we can remove the ignoreConfigID entrys # Skip configID if ["ignoreConfigID"] is True if IGNORE_CONFIG_ID in globalConfig[configId].keys(): # Check that flag has the correct type if not isinstance(globalConfig[configId][IGNORE_CONFIG_ID], bool): sc().error( "The " + IGNORE_CONFIG_ID + " of " + configId + " has a type " + str(type(globalConfig[configId][IGNORE_CONFIG_ID])) + " instead of bool. " + "Please be sure to use correct JSON syntax: boolean constants are written true and false." ) elif globalConfig[configId][IGNORE_CONFIG_ID] is True: globalConfig.pop(configId) # Check whether the globalConfig is empty if not globalConfig: sc().warning( "No configID was defined or all of them were ignored.") return globalConfig
def main(arguments): """ Main function of the script. :param arguments: Dictionary that contains the arguments that influence the execution. Currently avalaible arguments: - verbose : Extra info will be printed during execution. :return: None """ # Store original path variables pathOldValue = os.environ["PATH"] if "Graphviz" not in os.environ["PATH"]: graphvizBinAbspath = os.path.abspath(arguments.graphviz_bin_folder) # Add to path os.environ["PATH"] += (graphvizBinAbspath + ";") try: os.chdir( ".." ) # Only do this here (not within generateCallGraph() in order to avoid race conditions) generateCallGraph(EMMA_PROFILE_FILE_PATH, EMMA_EXECUTION_STRING, arguments.verbose) generateCallGraph(EMMA_VIS_PROFILE_FILE_PATH, EMMA_VIS_EXECUTION_STRING, arguments.verbose) except Exception as exception: # pylint: disable=broad-except # Rationale: We are not trying to catch a specific exception type here. # The purpose of this is, that the PATH environment variable will be set back in case of an error. sc().error("An exception was caught:", exception) # Get back initial path config os.environ["PATH"] = pathOldValue os.chdir("genDoc") # Change working directory back
def appendModuleConsumptionToMarkdownOverview(self, markdownFilePath): """ Appends consumptionByCategorisedModules and the corresponding plot to the Markdown file :param markdownFilePath: The path of the Markdown file to which the data will be appended to. :return: nothing """ sc().info("Appending object summary to overview...") self.plotByCategorisedModules( plotShow=False) # Re-write .png to ensure up-to-date overview with open(markdownFilePath, "a") as markdown: markdown.write("\n# Percentage share of modules\n") markdown.write(" \n " + self.consumptionByCategorisedModules. to_string().replace("\n", "\n ") + "\n") markdown.write( "\n\n*percentage share: share of the used memory*\n\n") # FIXME: Deactivated; colours of legend in figure not correct - possibly this figure is not even needed/useful (MSc) # markdown.write("<div align=\"center\"> <img src=\"" + os.path.join(self.project + MEMORY_ESTIMATION_BY_MODULES_PICTURE_NAME_FIX_PART + self.statsTimestamp + "." + MEMORY_ESTIMATION_PICTURE_FILE_EXTENSION) + "\" width=\"1000\"> </div>") # markdown.write("\n\n") markdown.write("<div align=\"center\"> <img src=\"" + os.path.join( self.project + MEMORY_ESTIMATION_PARTITION_OF_ALLOCATED_MEMORY_PICTURE_NAME_FIX_PART + self.statsTimestamp + "." + MEMORY_ESTIMATION_PICTURE_FILE_EXTENSION) + "\" width=\"1000\"> </div>") markdown.write("\n")
def checkIfFileExists(filePath): """ Check whether a file exists; If not exit with error message :param filePath: File path to check """ if not os.path.exists(filePath): sc().error("Given file (" + filePath + ") does not exist; exiting...")
def tabulariseAndSortMonolithContent(monolithContent): """ Parses the monolith file and returns a "table" (addresses are int's) of the following structure: table[n-th_entry][0] = virtual(int), ...[1] = physical(int), ...[2] = offset(int), ...[3] = size(int), ...[4] = section(str) Offset = physical - virtual :param monolithContent: Content from monolith as text (all lines) :return: list of lists """ table = [] # "headers": virtual, physical, size, section monolithPattern = Emma.emma_libs.ghsMapfileRegexes.UpperMonolithPattern( ) for line in monolithContent: match = re.search( Emma.emma_libs.ghsMapfileRegexes.UpperMonolithPattern(). pattern, line) if match: table.append([ int(match.group(monolithPattern.Groups.virtualAdress), 16), int(match.group(monolithPattern.Groups.physicalAdress), 16), (int( match.group(monolithPattern.Groups.physicalAdress), 16) - int(match.group(monolithPattern.Groups.virtualAdress), 16)), int(match.group(monolithPattern.Groups.size), 16), match.group(monolithPattern.Groups.section) ]) if len(table) <= 0: sc().error( "No entry in the monolith file was found! Please check the content of the monolith file and/or the regex'es." ) return table
def createMarkdownOverview(self): """ Creates the [PROJECT] overview md """ self.plotByMemType(plotShow=False) # Re-write .png to ensure up-to-date overview markdownFilePath = Emma.shared_libs.emma_helper.joinPath(self.resultsPath, self.project + "-Memory_Overview_" + self.statsTimestamp.replace(" ", "") + ".md") try: with open(markdownFilePath, 'w') as markdown: markdown.write("Memory Estimation Overview - " + self.project + "\n==========================\n\n") markdown.write("<div align=\"center\"> <img src=\"" +Emma.shared_libs.emma_helper.joinPath(self.project + MEMORY_ESTIMATION_BY_PERCENTAGES_PICTURE_NAME_FIX_PART + self.statsTimestamp + "." + MEMORY_ESTIMATION_PICTURE_FILE_EXTENSION) + "\" width=\"1000\"> </div>") markdown.write("\n") markdown.write("\n# Usage by Memory Type\n") markdown.write(" \n " + self.consumptionByMemType.to_string().replace("\n", "\n ") + "\n") markdown.write("\n\n*" + SIZE_DEC + ": Used Memory in Byte* | *" + BUDGET + ": Total Memory Size* | *" + USED_PERCENT + ": Used Memory in %* | *" + AVAILABLE_PERCENT + ": Available Memory in %*\n\n") markdown.write("\n# Usage by Mapfile\n") markdown.write(" \n " + self.consumptionByMemTypePerMap.to_string().replace("\n", "\n ") + "\n") markdown.write("\n\n*" + SIZE_DEC + ": Used Memory in Byte*\n\n") except FileNotFoundError: sc().error(f"The file `{os.path.abspath(markdownFilePath)}` was not found!") return markdownFilePath
def __printSelectedFiles(paths: typing.List[str]) -> None: sc().info("Selected files:") for path in paths: pathSplit: typing.List[str] = os.path.split(path) version: str = os.path.split(os.path.split(pathSplit[0])[0])[1] file: str = pathSplit[1] string = " " + version + " - " + file sc().info(string)
def __printSelectedFiles(self, paths: typing.List[str]) -> None: sc().info("Selected files:") for i, path in enumerate(paths): pathSplit: typing.List[str] = os.path.split(path) version: str = os.path.split(os.path.split(pathSplit[0])[0])[1] file: str = pathSplit[1] string = " " + version + " - " + file print(string)
def mkDirIfNeeded(path): """ Creates path and all intermediate directories until there :param path: Path to create """ if not os.path.isdir(path): os.makedirs(path) sc().info("Directory " + path + " created since not present")
def checkIfFolderExists(folderName): """ Check whether a folder exists in current directory; If not exit with error message :param folderName: Project to check """ if not os.path.isdir(folderName): sc().error("Given directory (" + os.path.abspath(folderName) + ") does not exist; exiting...")
def setUp(self): TestData.__init__(self) # Setting up the logger # This syntax will default init it and then change the settings with the __call__() # This is needed so that the unit tests can have different settings and not interfere with each other sc()(4, actionWarning=None, actionError=self.actionError) self.actionWarningWasCalled = False self.actionErrorWasCalled = False
def setAddressesGivenLength(self, addressLength): """ Function to set the address length value from an address length value. :return: None """ if addressLength >= 0: self.addressLength = addressLength else: sc().error("MemEntry: The addressLength (" + str(addressLength) + ") is negative!")
def setUp(self): """ Setting up the logger This syntax will default init it and then change the settings with the __call__() This is needed so that the unit tests can have different settings and not interfere with each other :return: None """ sc()(invVerbosity=4, actionWarning=lambda: sys.exit("warning"), actionError=lambda: sys.exit("error"))
def setAddressesGivenEnd(self, addressEnd): """ Function to set the address length value from an address end value. :return: None """ if self.addressStart <= addressEnd: self.addressLength = addressEnd - self.addressStart + 1 else: sc().error("MemEntry: The addressEnd (" + str(addressEnd) + ") is smaller than the addressStart (" + str(self.addressStart) + ")!")
def __readCategoriesJson(path): """ Function to load a categorisation json file. :param path: The path of the file that needs to be read. :return: Content of the json file. """ if os.path.exists(path): categoriesJson = Emma.shared_libs.emma_helper.readJson(path) else: categoriesJson = None sc().warning("There was no " + os.path.basename(path) + " file found, the categorization based on this will be skipped.") return categoriesJson
def main(): sc().info("Generating UML Class diagrams from the source files...") for sourceFilePath in LIST_OF_SOURCE_FILE_PATHS: sourceFileName = os.path.splitext(os.path.basename(sourceFilePath))[0] cwd = os.path.join( "..", README_CALL_GRAPH_AND_UML_PATH ) # The only way to specify the output directory of pyreverse subprocess.run("pyreverse -AS -o " + README_PICTURE_FORMAT + " " + sourceFilePath + " -p " + sourceFileName, shell=True, cwd=cwd)
def createStandardReports(): """ Create Section, Object and ObjectsInSections reports :return: None """ consumerCollections = consumerCollections2GlobalList() # Creating reports from the consumer collections for collectionType in consumerCollections: reportPath = Emma.emma_libs.memoryMap.createReportPath(self.settings.outputPath, self.settings.projectName, collectionType) Emma.emma_libs.memoryMap.writeReportToDisk(reportPath, consumerCollections[collectionType]) sc().info("A report was stored:", os.path.abspath(reportPath))
def main(arguments): """ Converts a log file (GHS run-time map output) to a parsable output for Emma Regarding the map command refer to the GHS devguide chapter 18. :return: None """ #sc(invVerbosity=-1, actionWarning=(lambda: sys.exit(-10) if arguments.Werror is not None else None), actionError=lambda: sys.exit(-10)) sc(invVerbosity=arguments.verbosity, actionError=lambda: sys.exit(-10)) sc().header("Emma Memory and Mapfile Analyser - `map` log file converter ", symbol="/") # Start and display time measurement TIME_START = timeit.default_timer() sc().info("Started processing at", datetime.datetime.now().strftime("%H:%M:%S")) verbosity, out, mapLogFilePath = processArguments(arguments) convertMapLogFileFile(out, mapLogFilePath) # Stop and display time measurement TIME_END = timeit.default_timer() sc().info("Finished job at:", datetime.datetime.now().strftime("%H:%M:%S"), "(duration: " "{0:.2f}".format(TIME_END - TIME_START) + "s)")
def getCandidates(self, filetype: str): """ Find files according to the chosen filetype :param filetype: chosen filetype :return: list of files that can be processed """ index = 0 for file in os.listdir(self.__path): if filetype in file: index += 1 self.__versionCandidates[index] = file if len(self.__versionCandidates) == 0: sc().error("No matching files in " + self.__path) return self.__versionCandidates
def main(arguments): """ Emma application :param arguments: parsed arguments :return: None """ # Setup SCout sc(invVerbosity=-1, actionWarning=(lambda: sys.exit(-10) if arguments.Werror is not None else None), actionError=lambda: sys.exit(-10)) sc().header("Emma Memory and Mapfile Analyser", symbol="/") # Start and display time measurement TIME_START = timeit.default_timer() sc().info("Started processing at", datetime.datetime.now().strftime("%H:%M:%S")) memoryManager = Emma.emma_libs.memoryManager.MemoryManager( *processArguments(arguments)) memoryManager.readConfiguration() memoryManager.processMapfiles() memoryManager.createReports() # Stop and display time measurement TIME_END = timeit.default_timer() sc().info("Finished job at:", datetime.datetime.now().strftime("%H:%M:%S"), "(duration: " "{0:.2f}".format(TIME_END - TIME_START) + "s)")
def end2Size(startAddr, endAddr): """ Convert size from start and end address (all in hex) :param startAddr: start address (hex) :param endAddr: end address (hex) :return: size (hex) """ if startAddr == endAddr: return hex(1) if int(startAddr, 16) > int(endAddr, 16): sc().error( f"End address ({endAddr}) is bigger than start address ({startAddr})!" ) return format(int(endAddr, 16) - int(startAddr, 16), "x")
def appendCategorisedImageToMarkdownOverview(self, markdownFilePath): """ Appends categorisedImage to the markdown file :param markdownFilePath: The path of the Markdown file to which the data will be appended to. :return: nothing """ sc().info("Appending object summary to overview...") with open(markdownFilePath, 'a') as markdown: markdown.write("\n# Modules included in allocated Memory\n") markdown.write(" \n " + self.__groupCategorisedImage().to_string().replace( "\n", "\n ") + "\n") markdown.write("\n\n")
def __addMapfilesToConfiguration(mapfilesPath, configuration): """ Function to add the mapfiles to the configuration. :param mapfilesPath: Path of the folder where the mapfiles are located. :param configuration: Configuration to which the mapfiles need to be added. :return: None """ if os.path.isdir(mapfilesPath): GhsConfiguration.__addFilesToConfiguration(mapfilesPath, configuration, "mapfiles") else: sc().error("The mapfiles folder (\"" + mapfilesPath + "\") does not exist!")
def unifyAddress(address): """ Convert hex or dec address and returns both (in this order) :param address: hex or dec address :return: [addressHex, addressDec) """ if isinstance(address, str) and address is not None: address = int(address, 16) addressHex = hex(address) elif isinstance(address, int) and address is not None: addressHex = hex(address) else: sc().error( "unifyAddress(): Address must be either of type int or str!") raise TypeError return addressHex, address
def __init__(self): super().__init__() self.pattern = re.compile( r""" # ^\s{4}[\.*\w+]+\s+0x[0-9a-f]+\s+[0x]*[0-9a-f]+\s\(\w+\s*/[\w+,\s]+\) (?:\s{4})(?P<section>[\.*\w+]+) # Section (?:\s+0x)(?P<address>[0-9a-f]+) # Base Address (?:\s+[0x]*)(?P<size>[0-9a-f]+) # Size(hex) """, re.X) self.Groups.name = "section" self.Groups.section = "section" self.Groups.origin = "address" # address is equvialent to base Address self.Groups.size = "size" sc().info("Preparing lower monolith summary...")
def __checkNumberOfFoundMapfiles(configId, configuration): """ Function to check the number of found mapfiles in a configuration. :param configId: The configId the configuration belongs to. :param configuration: The configuration in which the found mapfiles need to be checked. :return: """ result = False # Checking the number of the mapfiles that were found with the regexes if configuration["patterns"]["mapfiles"]: # If there is at least one, then the check was passed result = True else: sc().warning("No mapfiles found for configID: \"" + configId + "\"!") return result
def selectRoot() -> str: """ Propmpts the user for the root path of the project for the delta analysis :return: project root path """ deltaConfigPath: str = Emma.shared_libs.emma_helper.joinPath( "./", DELTA_CONFIG) if os.path.isfile(deltaConfigPath): rootpath = Emma.shared_libs.emma_helper.readJson( deltaConfigPath)[DELTA_LATEST_PATH] sc().info("Using " + rootpath + " as project.") else: rootpath = input("Enter project root path >") Emma.shared_libs.emma_helper.checkIfFolderExists(rootpath) return rootpath
def createSpecificMapfileProcesor(compiler, **kwargs): """ A factory for creating an object of one of the subclasses of the SpecificMapfileProcessor class. The concrete subclass is selected based on the received compiler name. :param compiler: The compiler name. :param kwargs: The arguments that will be forwarded to the constructor during the object creation. :return: An object of the selected subclass of the SpecificMapfileProcessor. """ mapfileProcessor = None if COMPILER_NAME_GHS == compiler: mapfileProcessor = Emma.emma_libs.ghsMapfileProcessor.GhsMapfileProcessor(**kwargs) else: sc().error("Unexpected compiler value: " + compiler) return mapfileProcessor
def init(self, testCaseName): # pylint: disable=attribute-defined-outside-init # Rationale: This class does not have an __init__() member so the member variables will be created here. """ Creating the environment of the test. :param testCaseName: The name of the test case. This will be used to create the output folder with the name of the test case. :return: None """ # Setting up the logger # This syntax will default init it and then change the settings with the __call__() # This is needed so that the unit tests can have different settings and not interfere with each other sc()(invVerbosity=4, actionWarning=None, actionError=lambda: sys.exit(-10)) # Switching to the Emma root folder os.chdir(os.path.join(os.path.dirname(__file__), "..", "..")) # Defining the paths of the folders used during the tests self.cmdLineTestRootFolder = os.path.join("tests", "other_files", "test__cmd_line") # Defining a path that shall contain the project files self.cmdLineTestProjectFolder = os.path.join(self.cmdLineTestRootFolder, "test_project") # Defining a path that shall contain the mapfiles self.cmdLineTestProjectMapfilesFolder = os.path.join(self.cmdLineTestProjectFolder, MAPFILES) # Defining a path for supplements self.cmdLineTestProjectSupplementFolder = os.path.join(self.cmdLineTestProjectFolder, SUPPLEMENT) # Defining a path that shall contain the output self.cmdLineTestOutputFolder = os.path.join("tests", "other_files", "test__cmd_line", testCaseName) # Defining a path that shall not exist self.nonExistingPath = os.path.join(self.cmdLineTestRootFolder, "this", "directory", "does", "not", "exist") # Checking whether the root folder still exist from the previous run, if it does, we shall not erase it, but ask the user to do it manually self.assertFalse(os.path.isdir(self.cmdLineTestRootFolder), "The temporary folder (\"" + self.cmdLineTestRootFolder + "\") still exists! Please delete it manually.") # Defining the location of the source test_project sourceTestProjectFolder = os.path.join("doc", "test_project") # Creating the root folder os.makedirs(self.cmdLineTestProjectFolder) os.makedirs(self.cmdLineTestProjectSupplementFolder) # Copying the project files for file in os.listdir(sourceTestProjectFolder): if os.path.splitext(file)[-1].lower() == ".json": shutil.copy(os.path.join(sourceTestProjectFolder, file), self.cmdLineTestProjectFolder) # Copying the mapfiles shutil.copytree(os.path.join(sourceTestProjectFolder, MAPFILES), os.path.join(self.cmdLineTestProjectFolder, MAPFILES)) # Creating the output folder for the results with the test case name os.makedirs(self.cmdLineTestOutputFolder)