def getHDRLabsDocumentation(input, output): """ Extracts sIBL_GUI Documentation body for HDRLabs.com. :param input: Input file to extract documentation body. :type input: unicode :param output: Output html file. :type output: unicode :return: Definition success. :rtype: bool """ LOGGER.info("{0} | Extracting 'body' tag content from {1}' file!".format( getHDRLabsDocumentation.__name__, input)) file = File(input) file.cache() LOGGER.info("{0} | Processing 'body' data!".format( getHDRLabsDocumentation.__name__)) content = [] skipLine = True for line in file.content: if re.search(r"<body>", line): skipLine = False elif re.search(r"</body>", line): skipLine = True not skipLine and content.append("{0}\n".format( line.replace("\t", "", 2))) file = File(output) file.content = content file.write() return True
def getHDRLabsDocumentation(input, output): """ Extracts sIBL_GUI Documentation body for HDRLabs.com. :param input: Input file to extract documentation body. :type input: unicode :param output: Output html file. :type output: unicode :return: Definition success. :rtype: bool """ LOGGER.info("{0} | Extracting 'body' tag content from {1}' file!".format(getHDRLabsDocumentation.__name__, input)) file = File(input) file.cache() LOGGER.info("{0} | Processing 'body' data!".format(getHDRLabsDocumentation.__name__)) content = [] skipLine = True for line in file.content: if re.search(r"<body>", line): skipLine = False elif re.search(r"</body>", line): skipLine = True not skipLine and content.append("{0}\n".format(line.replace("\t", "", 2))) file = File(output) file.content = content file.write() return True
def reStructuredTextToHtml(input, output, cssFile): """ Outputs a reStructuredText file to html. :param input: Input reStructuredText file to convert. :type input: unicode :param output: Output html file. :type output: unicode :param cssFile: Css file. :type cssFile: unicode :return: Definition success. :rtype: bool """ LOGGER.info("{0} | Converting '{1}' reStructuredText file to html!".format( reStructuredTextToHtml.__name__, input)) os.system("{0} --stylesheet-path='{1}' '{2}' > '{3}'".format( RST2HTML, os.path.join(os.path.dirname(__file__), cssFile), input, output)) LOGGER.info("{0} | Formatting html file!".format("Tidy")) os.system("tidy -config {0} -m '{1}'".format( os.path.join(os.path.dirname(__file__), TIDY_SETTINGS_FILE), output)) file = File(output) file.cache() LOGGER.info("{0} | Replacing spaces with tabs!".format( reStructuredTextToHtml.__name__)) file.content = [line.replace(" " * 4, "\t") for line in file.content] file.write() return True
def listImports(packages, filtersIn, filtersOut): """ Lists Application imports. :param packages: Packages. :type packages: list :param filtersIn: Filters in. :type filtersIn: tuple or list :param filtersOut: Filters out. :type filtersOut: tuple or list :return: Imports. :rtype: list """ imports = set(IMPORTS) for package in packages: path = __import__(package).__path__.pop() for file in sorted( list( foundations.walkers.filesWalker(path, filtersIn, filtersOut))): source = File(file) source.cache() for line in source.content: if not re.search("oncilla|foundations|manager|umbra|sibl_gui", line): search = re.search( "^\s*import\s*(?P<moduleA>[\w+\.]+)|^\s*from\s*(?P<moduleB>[\w+\.]+)\s+import", line) if search: statement = search.group("moduleA") or search.group( "moduleB") statement != "_" and imports.add(statement) return imports
def listImports(packages, filtersIn, filtersOut): """ Lists Application imports. :param packages: Packages. :type packages: list :param filtersIn: Filters in. :type filtersIn: tuple or list :param filtersOut: Filters out. :type filtersOut: tuple or list :return: Imports. :rtype: list """ imports = set(IMPORTS) for package in packages: path = __import__(package).__path__.pop() for file in sorted(list(foundations.walkers.filesWalker(path, filtersIn, filtersOut))): source = File(file) source.cache() for line in source.content: if not re.search("oncilla|foundations|manager|umbra|sibl_gui", line): search = re.search("^\s*import\s*(?P<moduleA>[\w+\.]+)|^\s*from\s*(?P<moduleB>[\w+\.]+)\s+import", line) if search: statement = search.group("moduleA") or search.group("moduleB") statement != "_" and imports.add(statement) return imports
def reStructuredTextToHtml(input, output, cssFile): """ Outputs a reStructuredText file to html. :param input: Input reStructuredText file to convert. :type input: unicode :param output: Output html file. :type output: unicode :param cssFile: Css file. :type cssFile: unicode :return: Definition success. :rtype: bool """ LOGGER.info("{0} | Converting '{1}' reStructuredText file to html!".format(reStructuredTextToHtml.__name__, input)) os.system("{0} --stylesheet-path='{1}' '{2}' > '{3}'".format(RST2HTML, os.path.join(os.path.dirname(__file__), cssFile), input, output)) LOGGER.info("{0} | Formatting html file!".format("Tidy")) os.system("tidy -config {0} -m '{1}'".format(os.path.join(os.path.dirname(__file__), TIDY_SETTINGS_FILE), output)) file = File(output) file.cache() LOGGER.info("{0} | Replacing spaces with tabs!".format(reStructuredTextToHtml.__name__)) file.content = [line.replace(" " * 4, "\t") for line in file.content] file.write() return True
def getHDRLabsDocumentation(fileIn, fileOut): """ This definition extracts sIBL_GUI Documentation body for HDRLabs.com. :param fileIn: File to convert. ( String ) :param fileOut: Output file. ( String ) """ LOGGER.info("{0} | Extracting 'body' tag content from {1}' file!".format( getHDRLabsDocumentation.__name__, fileIn)) file = File(fileIn) file.cache() LOGGER.info("{0} | Processing 'body' data!".format( getHDRLabsDocumentation.__name__)) content = [] skipLine = True for line in file.content: if re.search(r"<body>", line): skipLine = False elif re.search(r"</body>", line): skipLine = True not skipLine and content.append("{0}\n".format( line.replace("\t", "", 2))) file = File(fileOut) file.content = content file.write()
def getHDRLabsDocumentation(fileIn, fileOut): """ This definition extracts sIBL_GUI Documentation body for HDRLabs.com. :param fileIn: File to convert. ( String ) :param fileOut: Output file. ( String ) """ LOGGER.info("{0} | Extracting 'body' tag content from {1}' file!".format(getHDRLabsDocumentation.__name__, fileIn)) file = File(fileIn) file.cache() LOGGER.info("{0} | Processing 'body' data!".format(getHDRLabsDocumentation.__name__)) content = [] skipLine = True for line in file.content: if re.search(r"<body>", line): skipLine = False elif re.search(r"</body>", line): skipLine = True not skipLine and content.append("{0}\n".format(line.replace("\t", "", 2))) file = File(fileOut) file.content = content file.write()
def sliceReStructuredText(input, output): """ Slices given reStructuredText file. :param input: ReStructuredText file to slice. :type input: unicode :param output: Directory to output sliced reStructuredText files. :type output: unicode :return: Definition success. :rtype: bool """ LOGGER.info("{0} | Slicing '{1}' file!".format(sliceReStructuredText.__name__, input)) file = File(input) file.cache() slices = OrderedDict() for i, line in enumerate(file.content): search = re.search(r"^\.\. \.(\w+)", line) if search: slices[search.groups()[0]] = i + SLICE_ATTRIBUTE_INDENT index = 0 for slice, sliceStart in slices.iteritems(): sliceFile = File(os.path.join(output, "{0}.{1}".format(slice, OUTPUT_FILES_EXTENSION))) LOGGER.info("{0} | Outputing '{1}' file!".format(sliceReStructuredText.__name__, sliceFile.path)) sliceEnd = index < (len(slices.values()) - 1) and slices.values()[index + 1] - SLICE_ATTRIBUTE_INDENT or \ len(file.content) for i in range(sliceStart, sliceEnd): skipLine = False for item in CONTENT_DELETION: if re.search(item, file.content[i]): LOGGER.info("{0} | Skipping Line '{1}' with '{2}' content!".format(sliceReStructuredText.__name__, i, item)) skipLine = True break if skipLine: continue line = file.content[i] for pattern, value in STATEMENT_SUBSTITUTE.iteritems(): line = re.sub(pattern, value, line) search = re.search(r"- `[\w ]+`_ \(([\w\.]+)\)", line) if search: LOGGER.info("{0} | Updating Line '{1}' link: '{2}'!".format(sliceReStructuredText.__name__, i, search.groups()[0])) line = "- :ref:`{0}`\n".format(search.groups()[0]) sliceFile.content.append(line) sliceFile.write() index += 1 return True
def buildTocTree(title, input, output, contentDirectory): """ Builds Sphinx documentation table of content tree file. :param title: Package title. :type title: unicode :param input: Input file to convert. :type input: unicode :param output: Output file. :type output: unicode :param contentDirectory: Directory containing the content to be included in the table of content. :type contentDirectory: unicode :return: Definition success. :rtype: bool """ LOGGER.info("{0} | Building Sphinx documentation index '{1}' file!".format( buildTocTree.__name__, output)) file = File(input) file.cache() existingFiles = [ foundations.strings.getSplitextBasename(item) for item in glob.glob( "{0}/*{1}".format(contentDirectory, FILES_EXTENSION)) ] relativeDirectory = contentDirectory.replace( "{0}/".format(os.path.dirname(output)), "") tocTree = ["\n"] for line in file.content: search = re.search(r"`([a-zA-Z_ ]+)`_", line) if not search: continue item = search.groups()[0] code = "{0}{1}".format(item[0].lower(), item.replace(" ", "")[1:]) if code in existingFiles: link = "{0}/{1}".format(relativeDirectory, code) data = "{0}{1}{2} <{3}>\n".format(" ", " " * line.index("-"), item, link) LOGGER.info("{0} | Adding '{1}' entry to Toc Tree!".format( buildTocTree.__name__, data.replace("\n", ""))) tocTree.append(data) tocTree.append("\n") TOCTREE_TEMPLATE_BEGIN[0] = TOCTREE_TEMPLATE_BEGIN[0].format(title) TOCTREE_TEMPLATE_BEGIN[1] = TOCTREE_TEMPLATE_BEGIN[1].format( "=" * len(TOCTREE_TEMPLATE_BEGIN[0])) content = TOCTREE_TEMPLATE_BEGIN content.extend(tocTree) content.extend(TOCTREE_TEMPLATE_END) file = File(output) file.content = content file.write() return True
def sliceDocumentation(fileIn, outputDirectory): """ This Definition slices given documentation file. :param fileIn: File to convert. ( String ) :param outputDirectory: Output directory. ( String ) """ LOGGER.info("{0} | Slicing '{1}' file!".format(sliceDocumentation.__name__, fileIn)) file = File(fileIn) file.cache() slices = OrderedDict() for i, line in enumerate(file.content): search = re.search(r"^\.\. \.(\w+)", line) if search: slices[search.groups()[0]] = i + SLICE_ATTRIBUTE_INDENT index = 0 for slice, sliceStart in slices.iteritems(): sliceFile = File( os.path.join(outputDirectory, "{0}.{1}".format(slice, OUTPUT_FILES_EXTENSION))) LOGGER.info("{0} | Outputing '{1}' file!".format( sliceDocumentation.__name__, sliceFile.path)) sliceEnd = index < (len(slices.values()) - 1) and slices.values()[index + 1] - SLICE_ATTRIBUTE_INDENT or \ len(file.content) for i in range(sliceStart, sliceEnd): skipLine = False for item in CONTENT_DELETION: if re.search(item, file.content[i]): LOGGER.info( "{0} | Skipping Line '{1}' with '{2}' content!".format( sliceDocumentation.__name__, i, item)) skipLine = True break if skipLine: continue line = file.content[i] for pattern, value in CONTENT_SUBSTITUTIONS.iteritems(): line = re.sub(pattern, value, line) search = re.search(r"- `[\w ]+`_ \(([\w\.]+)\)", line) if search: LOGGER.info("{0} | Updating Line '{1}' link: '{2}'!".format( sliceDocumentation.__name__, i, search.groups()[0])) line = "- :ref:`{0}`\n".format(search.groups()[0]) sliceFile.content.append(line) sliceFile.write() index += 1
def testUncache(self): """ Tests :meth:`foundations.io.File.uncache` method. """ ioFile = File(TEXT_FILE) ioFile.cache() self.assertListEqual(ioFile.content, FILE_CONTENT) ioFile.uncache() self.assertListEqual(ioFile.content, [])
def buildTocTree(title, input, output, contentDirectory): """ Builds Sphinx documentation table of content tree file. :param title: Package title. :type title: unicode :param input: Input file to convert. :type input: unicode :param output: Output file. :type output: unicode :param contentDirectory: Directory containing the content to be included in the table of content. :type contentDirectory: unicode :return: Definition success. :rtype: bool """ LOGGER.info("{0} | Building Sphinx documentation index '{1}' file!".format(buildTocTree.__name__, output)) file = File(input) file.cache() existingFiles = [foundations.strings.getSplitextBasename(item) for item in glob.glob("{0}/*{1}".format(contentDirectory, FILES_EXTENSION))] relativeDirectory = contentDirectory.replace("{0}/".format(os.path.dirname(output)), "") tocTree = ["\n"] for line in file.content: search = re.search(r"`([a-zA-Z_ ]+)`_", line) if not search: continue item = search.groups()[0] code = "{0}{1}".format(item[0].lower(), item.replace(" ", "")[1:]) if code in existingFiles: link = "{0}/{1}".format(relativeDirectory, code) data = "{0}{1}{2} <{3}>\n".format(" ", " " * line.index("-"), item, link) LOGGER.info("{0} | Adding '{1}' entry to Toc Tree!".format(buildTocTree.__name__, data.replace("\n", ""))) tocTree.append(data) tocTree.append("\n") TOCTREE_TEMPLATE_BEGIN[0] = TOCTREE_TEMPLATE_BEGIN[0].format(title) TOCTREE_TEMPLATE_BEGIN[1] = TOCTREE_TEMPLATE_BEGIN[1].format("=" * len(TOCTREE_TEMPLATE_BEGIN[0])) content = TOCTREE_TEMPLATE_BEGIN content.extend(tocTree) content.extend(TOCTREE_TEMPLATE_END) file = File(output) file.content = content file.write() return True
def sliceDocumentation(fileIn, outputDirectory): """ This Definition slices given documentation file. :param fileIn: File to convert. ( String ) :param outputDirectory: Output directory. ( String ) """ LOGGER.info("{0} | Slicing '{1}' file!".format(sliceDocumentation.__name__, fileIn)) file = File(fileIn) file.cache() slices = OrderedDict() for i, line in enumerate(file.content): search = re.search(r"^\.\. \.(\w+)", line) if search: slices[search.groups()[0]] = i + SLICE_ATTRIBUTE_INDENT index = 0 for slice, sliceStart in slices.iteritems(): sliceFile = File(os.path.join(outputDirectory, "{0}.{1}".format(slice, OUTPUT_FILES_EXTENSION))) LOGGER.info("{0} | Outputing '{1}' file!".format(sliceDocumentation.__name__, sliceFile.path)) sliceEnd = index < (len(slices.values()) - 1) and slices.values()[index + 1] - SLICE_ATTRIBUTE_INDENT or \ len(file.content) for i in range(sliceStart, sliceEnd): skipLine = False for item in CONTENT_DELETION: if re.search(item, file.content[i]): LOGGER.info("{0} | Skipping Line '{1}' with '{2}' content!".format(sliceDocumentation.__name__, i, item)) skipLine = True break if skipLine: continue line = file.content[i] for pattern, value in CONTENT_SUBSTITUTIONS.iteritems(): line = re.sub(pattern, value, line) search = re.search(r"- `[\w ]+`_ \(([\w\.]+)\)", line) if search: LOGGER.info("{0} | Updating Line '{1}' link: '{2}'!".format(sliceDocumentation.__name__, i, search.groups()[0])) line = "- :ref:`{0}`\n".format(search.groups()[0]) sliceFile.content.append(line) sliceFile.write() index += 1
def getSphinxDocumentationTocTree(title, fileIn, fileOut, contentDirectory): """ This definition gets Sphinx documentation index file. :param title: Package title. ( String ) :param fileIn: File to convert. ( String ) :param fileOut: Output file. ( String ) :param contentDirectory: Content directory. ( String ) """ LOGGER.info("{0} | Building Sphinx documentation index '{1}' file!".format( getSphinxDocumentationTocTree.__name__, fileOut)) file = File(fileIn) file.cache() existingFiles = [ foundations.strings.getSplitextBasename(item) for item in glob.glob( "{0}/*{1}".format(contentDirectory, FILES_EXTENSION)) ] relativeDirectory = contentDirectory.replace( "{0}/".format(os.path.dirname(fileOut)), "") tocTree = ["\n"] for line in file.content: search = re.search(r"`([a-zA-Z_ ]+)`_", line) if not search: continue item = search.groups()[0] code = "{0}{1}".format(item[0].lower(), item.replace(" ", "")[1:]) if code in existingFiles: link = "{0}/{1}".format(relativeDirectory, code) data = "{0}{1}{2} <{3}>\n".format(" ", " " * line.index("-"), item, link) LOGGER.info("{0} | Adding '{1}' entry to Toc Tree!".format( getSphinxDocumentationTocTree.__name__, data.replace("\n", ""))) tocTree.append(data) tocTree.append("\n") TOCTREE_TEMPLATE_BEGIN[0] = TOCTREE_TEMPLATE_BEGIN[0].format(title) TOCTREE_TEMPLATE_BEGIN[1] = TOCTREE_TEMPLATE_BEGIN[1].format( "=" * len(TOCTREE_TEMPLATE_BEGIN[0])) content = TOCTREE_TEMPLATE_BEGIN content.extend(tocTree) content.extend(TOCTREE_TEMPLATE_END) file = File(fileOut) file.content = content file.write()
def testWrite(self): """ This method tests :meth:`foundations.io.File.write` method. """ fileDescriptor, path = tempfile.mkstemp() ioFile = File(path) self.assertIsInstance(ioFile.content, list) ioFile.content = FILE_CONTENT writeSuccess = ioFile.write() self.assertTrue(writeSuccess) ioFile.cache() self.assertListEqual(ioFile.content, FILE_CONTENT) os.close(fileDescriptor)
def testClear(self): """ Tests :meth:`foundations.io.File.clear` method. """ fileDescriptor, path = tempfile.mkstemp() ioFile = File(unicode(path)) self.assertIsInstance(ioFile.content, list) ioFile.content = FILE_CONTENT ioFile.write() self.assertTrue(ioFile.clear()) ioFile.cache() self.assertListEqual(ioFile.content, []) os.close(fileDescriptor)
def testAppend(self): """ This method tests :meth:`foundations.io.File.append` method. """ fileDescriptor, path = tempfile.mkstemp() ioFile = File(path) self.assertIsInstance(ioFile.content, list) ioFile.content = FILE_CONTENT ioFile.write() append = ioFile.append() self.assertTrue(append) ioFile.cache() self.assertListEqual(ioFile.content, FILE_CONTENT + FILE_CONTENT) os.close(fileDescriptor)
def getSphinxDocumentationTocTree(title, fileIn, fileOut, contentDirectory): """ This definition gets Sphinx documentation index file. :param title: Package title. ( String ) :param fileIn: File to convert. ( String ) :param fileOut: Output file. ( String ) :param contentDirectory: Content directory. ( String ) """ LOGGER.info("{0} | Building Sphinx documentation index '{1}' file!".format(getSphinxDocumentationTocTree.__name__, fileOut)) file = File(fileIn) file.cache() existingFiles = [foundations.strings.getSplitextBasename(item) for item in glob.glob("{0}/*{1}".format(contentDirectory, FILES_EXTENSION))] relativeDirectory = contentDirectory.replace("{0}/".format(os.path.dirname(fileOut)), "") tocTree = ["\n"] for line in file.content: search = re.search(r"`([a-zA-Z_ ]+)`_", line) if not search: continue item = search.groups()[0] code = "{0}{1}".format(item[0].lower(), item.replace(" ", "")[1:]) if code in existingFiles: link = "{0}/{1}".format(relativeDirectory, code) data = "{0}{1}{2} <{3}>\n".format(" ", " " * line.index("-"), item, link) LOGGER.info("{0} | Adding '{1}' entry to Toc Tree!".format(getSphinxDocumentationTocTree.__name__, data.replace("\n", ""))) tocTree.append(data) tocTree.append("\n") TOCTREE_TEMPLATE_BEGIN[0] = TOCTREE_TEMPLATE_BEGIN[0].format(title) TOCTREE_TEMPLATE_BEGIN[1] = TOCTREE_TEMPLATE_BEGIN[1].format("=" * len(TOCTREE_TEMPLATE_BEGIN[0])) content = TOCTREE_TEMPLATE_BEGIN content.extend(tocTree) content.extend(TOCTREE_TEMPLATE_END) file = File(fileOut) file.content = content file.write()
def testCache(self): """ Tests :meth:`foundations.io.File.cache` method. """ ioFile = File(TEXT_FILE) self.assertIsInstance(ioFile.content, list) cacheSuccess = ioFile.cache() self.assertTrue(cacheSuccess) self.assertIsInstance(ioFile.content, list) self.assertListEqual(ioFile.content, FILE_CONTENT)
def reStructuredTextToHtml(fileIn, fileOut): """ This definition outputs a reStructuredText file to html. :param fileIn: File to convert. ( String ) :param fileOut: Output file. ( String ) """ LOGGER.info("{0} | Converting '{1}' reStructuredText file to html!".format(reStructuredTextToHtml.__name__, fileIn)) os.system("{0} --stylesheet-path='{1}' '{2}' > '{3}'".format(RST2HTML, os.path.join(os.path.dirname(__file__), CSS_FILE), fileIn, fileOut)) LOGGER.info("{0} | Formatting html file!".format("Tidy")) os.system("tidy -config {0} -m '{1}'".format(os.path.join(os.path.dirname(__file__), TIDY_SETTINGS_FILE), fileOut)) file = File(fileOut) file.cache() LOGGER.info("{0} | Replacing spaces with tabs!".format(reStructuredTextToHtml.__name__)) file.content = [line.replace(" " * 4, "\t") for line in file.content] file.write()
def reStructuredTextToHtml(fileIn, fileOut): """ This definition outputs a reStructuredText file to html. :param fileIn: File to convert. ( String ) :param fileOut: Output file. ( String ) """ LOGGER.info("{0} | Converting '{1}' reStructuredText file to html!".format( reStructuredTextToHtml.__name__, fileIn)) os.system("{0} --stylesheet-path='{1}' '{2}' > '{3}'".format( RST2HTML, os.path.join(os.path.dirname(__file__), CSS_FILE), fileIn, fileOut)) LOGGER.info("{0} | Formatting html file!".format("Tidy")) os.system("tidy -config {0} -m '{1}'".format( os.path.join(os.path.dirname(__file__), TIDY_SETTINGS_FILE), fileOut)) file = File(fileOut) file.cache() LOGGER.info("{0} | Replacing spaces with tabs!".format( reStructuredTextToHtml.__name__)) file.content = [line.replace(" " * 4, "\t") for line in file.content] file.write()
def applyPatch(self, patch): """ Applies given patch. :param patch: Patch. :type patch: Patch :return: Method success. :rtype: bool """ historyFile = File(self.__historyFile) patchesHistory = historyFile.cache() and [line.strip() for line in historyFile.content] or [] if patch.uid not in patchesHistory: LOGGER.debug("> Applying '{0}' patch!".format(patch.name)) if patch.apply(): historyFile.content = ["{0}\n".format(patch.uid)] historyFile.append() else: raise umbra.exceptions.PatchApplyError("{0} | '{1}' patch failed to apply!".format( self.__class__.__name__, patch.path)) else: LOGGER.debug("> '{0}' patch is already applied!".format(patch.name)) return True
def sliceReStructuredText(input, output): """ Slices given reStructuredText file. :param input: ReStructuredText file to slice. :type input: unicode :param output: Directory to output sliced reStructuredText files. :type output: unicode :return: Definition success. :rtype: bool """ LOGGER.info("{0} | Slicing '{1}' file!".format(sliceReStructuredText.__name__, input)) file = File(input) file.cache() slices = OrderedDict() for i, line in enumerate(file.content): search = re.search(r"^\.\. \.(\w+)", line) if search: slices[search.groups()[0]] = i + SLICE_ATTRIBUTE_INDENT index = 0 for slice, sliceStart in slices.iteritems(): sliceFile = File(os.path.join(output, "{0}.{1}".format(slice, OUTPUT_FILES_EXTENSION))) LOGGER.info("{0} | Outputing '{1}' file!".format(sliceReStructuredText.__name__, sliceFile.path)) sliceEnd = ( index < (len(slices.values()) - 1) and slices.values()[index + 1] - SLICE_ATTRIBUTE_INDENT or len(file.content) ) for i in range(sliceStart, sliceEnd): skipLine = False for item in CONTENT_DELETION: if re.search(item, file.content[i]): LOGGER.info( "{0} | Skipping Line '{1}' with '{2}' content!".format(sliceReStructuredText.__name__, i, item) ) skipLine = True break if skipLine: continue line = file.content[i] for pattern, value in STATEMENT_SUBSTITUTE.iteritems(): line = re.sub(pattern, value, line) search = re.search(r"- `[\w ]+`_ \(([\w\.]+)\)", line) if search: LOGGER.info( "{0} | Updating Line '{1}' link: '{2}'!".format( sliceReStructuredText.__name__, i, search.groups()[0] ) ) line = "- :ref:`{0}`\n".format(search.groups()[0]) sliceFile.content.append(line) sliceFile.write() index += 1 return True
def getSphinxDocumentationApi(packages, cloneDirectory, outputDirectory, apiFile): """ This definition gets Sphinx documentation API. :param packages: Packages. ( String ) :param cloneDirectory: Source clone directory. ( String ) :param outputDirectory: Content directory. ( String ) :param apiFile: API file. ( String ) """ LOGGER.info("{0} | Building Sphinx documentation API!".format(getSphinxDocumentationApi.__name__)) if os.path.exists(cloneDirectory): shutil.rmtree(cloneDirectory) os.makedirs(cloneDirectory) packagesModules = {"apiModules" : [], "testsModules" : []} for package in packages.split(","): package = __import__(package) path = foundations.common.getFirstItem(package.__path__) sourceDirectory = os.path.dirname(path) for file in sorted(list(foundations.walkers.filesWalker(sourceDirectory, filtersIn=("{0}.*\.ui$".format(path),)))): LOGGER.info("{0} | Ui file: '{1}'".format(getSphinxDocumentationApi.__name__, file)) targetDirectory = os.path.dirname(file).replace(sourceDirectory, "") directory = "{0}{1}".format(cloneDirectory, targetDirectory) if not foundations.common.pathExists(directory): os.makedirs(directory) source = os.path.join(directory, os.path.basename(file)) shutil.copyfile(file, source) modules = [] for file in sorted(list(foundations.walkers.filesWalker(sourceDirectory, filtersIn=("{0}.*\.py$".format(path),), filtersOut=EXCLUDED_PYTHON_MODULES))): LOGGER.info("{0} | Python file: '{1}'".format(getSphinxDocumentationApi.__name__, file)) module = "{0}.{1}" .format((".".join(os.path.dirname(file).replace(sourceDirectory, "").split("/"))), foundations.strings.getSplitextBasename(file)).strip(".") LOGGER.info("{0} | Module name: '{1}'".format(getSphinxDocumentationApi.__name__, module)) directory = os.path.dirname(os.path.join(cloneDirectory, module.replace(".", "/"))) if not foundations.common.pathExists(directory): os.makedirs(directory) source = os.path.join(directory, os.path.basename(file)) shutil.copyfile(file, source) sourceFile = File(source) sourceFile.cache() trimFromIndex = trimEndIndex = None inMultilineString = inDecorator = False for i, line in enumerate(sourceFile.content): if re.search(r"__name__ +\=\= +\"__main__\"", line): trimFromIndex = i for pattern, value in CONTENT_SUBSTITUTIONS.iteritems(): if re.search(pattern, line): sourceFile.content[i] = re.sub(pattern, value, line) strippedLine = line.strip() if re.search(r"^\"\"\"", strippedLine): inMultilineString = not inMultilineString if inMultilineString: continue if re.search(r"^@\w+", strippedLine) and \ not re.search(r"@property", strippedLine) and \ not re.search(r"^@\w+\.setter", strippedLine) and \ not re.search(r"^@\w+\.deleter", strippedLine): inDecorator = True indent = re.search(r"^([ \t]*)", line) if re.search(r"^[ \t]*def \w+", sourceFile.content[i]) or \ re.search(r"^[ \t]*class \w+", sourceFile.content[i]): inDecorator = False if not inDecorator: continue sourceFile.content[i] = "{0}{1} {2}".format(indent.groups()[0], DECORATORS_COMMENT_MESSAGE, line) if trimFromIndex: LOGGER.info("{0} | Trimming '__main__' statements!".format(getSphinxDocumentationApi.__name__)) content = [sourceFile.content[i] for i in range(trimFromIndex)] content.append("{0}\n".format(STATEMENTS_UPDATE_MESSAGGE)) sourceFile.content = content sourceFile.write() if "__init__.py" in file: continue rstFilePath = "{0}{1}".format(module, FILES_EXTENSION) LOGGER.info("{0} | Building API file: '{1}'".format(getSphinxDocumentationApi.__name__, rstFilePath)) rstFile = File(os.path.join(outputDirectory, rstFilePath)) header = ["_`{0}`\n".format(module), "==={0}\n".format("="*len(module)), "\n", ".. automodule:: {0}\n".format(module), "\n"] rstFile.content.extend(header) functions = OrderedDict() classes = OrderedDict() moduleAttributes = OrderedDict() for member, object in moduleBrowser._readmodule(module, [source, ]).iteritems(): if object.__class__ == moduleBrowser.Function: if not member.startswith("_"): functions[member] = [".. autofunction:: {0}\n".format(member)] elif object.__class__ == moduleBrowser.Class: classes[member] = [".. autoclass:: {0}\n".format(member), " :show-inheritance:\n", " :members:\n"] elif object.__class__ == moduleBrowser.Global: if not member.startswith("_"): moduleAttributes[member] = [".. attribute:: {0}.{1}\n".format(module, member)] moduleAttributes and rstFile.content.append("Module Attributes\n-----------------\n\n") for moduleAttribute in moduleAttributes.itervalues(): rstFile.content.extend(moduleAttribute) rstFile.content.append("\n") functions and rstFile.content.append("Functions\n---------\n\n") for function in functions.itervalues(): rstFile.content.extend(function) rstFile.content.append("\n") classes and rstFile.content.append("Classes\n-------\n\n") for class_ in classes.itervalues(): rstFile.content.extend(class_) rstFile.content.append("\n") rstFile.write() modules.append(module) packagesModules["apiModules"].extend([module for module in modules if not "tests" in module]) packagesModules["testsModules"].extend([module for module in modules if "tests" in module]) apiFile = File(apiFile) apiFile.content.extend(TOCTREE_TEMPLATE_BEGIN) for module in packagesModules["apiModules"]: apiFile.content.append(" {0} <{1}>\n".format(module, "api/{0}".format(module))) for module in packagesModules["testsModules"]: apiFile.content.append(" {0} <{1}>\n".format(module, "api/{0}".format(module))) apiFile.content.extend(TOCTREE_TEMPLATE_END) apiFile.write()
def getSphinxDocumentationApi(packages, cloneDirectory, outputDirectory, apiFile): """ This definition gets Sphinx documentation API. :param packages: Packages. ( String ) :param cloneDirectory: Source clone directory. ( String ) :param outputDirectory: Content directory. ( String ) :param apiFile: API file. ( String ) """ LOGGER.info("{0} | Building Sphinx documentation API!".format( getSphinxDocumentationApi.__name__)) if os.path.exists(cloneDirectory): shutil.rmtree(cloneDirectory) os.makedirs(cloneDirectory) packagesModules = {"apiModules": [], "testsModules": []} for package in packages.split(","): package = __import__(package) path = foundations.common.getFirstItem(package.__path__) sourceDirectory = os.path.dirname(path) for file in sorted( list( foundations.walkers.filesWalker( sourceDirectory, filtersIn=("{0}.*\.ui$".format(path), )))): LOGGER.info("{0} | Ui file: '{1}'".format( getSphinxDocumentationApi.__name__, file)) targetDirectory = os.path.dirname(file).replace( sourceDirectory, "") directory = "{0}{1}".format(cloneDirectory, targetDirectory) if not foundations.common.pathExists(directory): os.makedirs(directory) source = os.path.join(directory, os.path.basename(file)) shutil.copyfile(file, source) modules = [] for file in sorted( list( foundations.walkers.filesWalker( sourceDirectory, filtersIn=("{0}.*\.py$".format(path), ), filtersOut=EXCLUDED_PYTHON_MODULES))): LOGGER.info("{0} | Python file: '{1}'".format( getSphinxDocumentationApi.__name__, file)) module = "{0}.{1}".format( (".".join( os.path.dirname(file).replace(sourceDirectory, "").split("/"))), foundations.strings.getSplitextBasename(file)).strip(".") LOGGER.info("{0} | Module name: '{1}'".format( getSphinxDocumentationApi.__name__, module)) directory = os.path.dirname( os.path.join(cloneDirectory, module.replace(".", "/"))) if not foundations.common.pathExists(directory): os.makedirs(directory) source = os.path.join(directory, os.path.basename(file)) shutil.copyfile(file, source) sourceFile = File(source) sourceFile.cache() trimFromIndex = trimEndIndex = None inMultilineString = inDecorator = False for i, line in enumerate(sourceFile.content): if re.search(r"__name__ +\=\= +\"__main__\"", line): trimFromIndex = i for pattern, value in CONTENT_SUBSTITUTIONS.iteritems(): if re.search(pattern, line): sourceFile.content[i] = re.sub(pattern, value, line) strippedLine = line.strip() if re.search(r"^\"\"\"", strippedLine): inMultilineString = not inMultilineString if inMultilineString: continue if re.search(r"^@\w+", strippedLine) and \ not re.search(r"@property", strippedLine) and \ not re.search(r"^@\w+\.setter", strippedLine) and \ not re.search(r"^@\w+\.deleter", strippedLine): inDecorator = True indent = re.search(r"^([ \t]*)", line) if re.search(r"^[ \t]*def \w+", sourceFile.content[i]) or \ re.search(r"^[ \t]*class \w+", sourceFile.content[i]): inDecorator = False if not inDecorator: continue sourceFile.content[i] = "{0}{1} {2}".format( indent.groups()[0], DECORATORS_COMMENT_MESSAGE, line) if trimFromIndex: LOGGER.info("{0} | Trimming '__main__' statements!".format( getSphinxDocumentationApi.__name__)) content = [sourceFile.content[i] for i in range(trimFromIndex)] content.append("{0}\n".format(STATEMENTS_UPDATE_MESSAGGE)) sourceFile.content = content sourceFile.write() if "__init__.py" in file: continue rstFilePath = "{0}{1}".format(module, FILES_EXTENSION) LOGGER.info("{0} | Building API file: '{1}'".format( getSphinxDocumentationApi.__name__, rstFilePath)) rstFile = File(os.path.join(outputDirectory, rstFilePath)) header = [ "_`{0}`\n".format(module), "==={0}\n".format("=" * len(module)), "\n", ".. automodule:: {0}\n".format(module), "\n" ] rstFile.content.extend(header) functions = OrderedDict() classes = OrderedDict() moduleAttributes = OrderedDict() for member, object in moduleBrowser._readmodule( module, [ source, ]).iteritems(): if object.__class__ == moduleBrowser.Function: if not member.startswith("_"): functions[member] = [ ".. autofunction:: {0}\n".format(member) ] elif object.__class__ == moduleBrowser.Class: classes[member] = [ ".. autoclass:: {0}\n".format(member), " :show-inheritance:\n", " :members:\n" ] elif object.__class__ == moduleBrowser.Global: if not member.startswith("_"): moduleAttributes[member] = [ ".. attribute:: {0}.{1}\n".format(module, member) ] moduleAttributes and rstFile.content.append( "Module Attributes\n-----------------\n\n") for moduleAttribute in moduleAttributes.itervalues(): rstFile.content.extend(moduleAttribute) rstFile.content.append("\n") functions and rstFile.content.append("Functions\n---------\n\n") for function in functions.itervalues(): rstFile.content.extend(function) rstFile.content.append("\n") classes and rstFile.content.append("Classes\n-------\n\n") for class_ in classes.itervalues(): rstFile.content.extend(class_) rstFile.content.append("\n") rstFile.write() modules.append(module) packagesModules["apiModules"].extend( [module for module in modules if not "tests" in module]) packagesModules["testsModules"].extend( [module for module in modules if "tests" in module]) apiFile = File(apiFile) apiFile.content.extend(TOCTREE_TEMPLATE_BEGIN) for module in packagesModules["apiModules"]: apiFile.content.append(" {0} <{1}>\n".format( module, "api/{0}".format(module))) for module in packagesModules["testsModules"]: apiFile.content.append(" {0} <{1}>\n".format( module, "api/{0}".format(module))) apiFile.content.extend(TOCTREE_TEMPLATE_END) apiFile.write()