Ejemplo n.º 1
0
def listImports(packages, filtersIn, filtersOut):
	"""
	Lists Application imports.

	:param packages: Packages.
	:type packages: list
	:param filtersIn: Filters in.
	:type filtersIn: tuple or list
	:param filtersOut: Filters out.
	:type filtersOut: tuple or list
	:return: Imports.
	:rtype: list
	"""

	imports = set(IMPORTS)
	for package in packages:
		path = __import__(package).__path__.pop()
		for file in sorted(list(foundations.walkers.filesWalker(path, filtersIn, filtersOut))):
			source = File(file)
			source.cache()
			for line in source.content:
				if not re.search("oncilla|foundations|manager|umbra|sibl_gui", line):
					search = re.search("^\s*import\s*(?P<moduleA>[\w+\.]+)|^\s*from\s*(?P<moduleB>[\w+\.]+)\s+import",
									   line)
					if search:
						statement = search.group("moduleA") or search.group("moduleB")
						statement != "_" and imports.add(statement)
	return imports
def bleach(file):
	"""
	Sanitizes given python module.

	:param file: Python module file.
	:type file: unicode
	:return: Definition success.
	:rtype: bool
	"""

	LOGGER.info("{0} | Sanitizing '{1}' python module!".format(__name__, file))

	sourceFile = File(file)
	content = sourceFile.read()
	for pattern in STATEMENT_SUBSTITUTE:
		matches = [match for match in re.finditer(pattern, content, re.DOTALL)]

		offset = 0
		for match in matches:
			start, end = match.start("bleach"), match.end("bleach")
			substitution = "{0}{1}".format(STATEMENT_UPDATE_MESSAGE,
										   re.sub("\n", "\n{0}".format(STATEMENT_UPDATE_MESSAGE),
												  match.group("bleach")))
			content = "".join((content[0: start + offset],
							   substitution,
							   content[end + offset:]))
			offset += len(substitution) - len(match.group("bleach"))

	sourceFile.content = [content]
	sourceFile.write()

	return True
Ejemplo n.º 3
0
def listImports(packages, filtersIn, filtersOut):
    """
	Lists Application imports.

	:param packages: Packages.
	:type packages: list
	:param filtersIn: Filters in.
	:type filtersIn: tuple or list
	:param filtersOut: Filters out.
	:type filtersOut: tuple or list
	:return: Imports.
	:rtype: list
	"""

    imports = set(IMPORTS)
    for package in packages:
        path = __import__(package).__path__.pop()
        for file in sorted(
                list(
                    foundations.walkers.filesWalker(path, filtersIn,
                                                    filtersOut))):
            source = File(file)
            source.cache()
            for line in source.content:
                if not re.search("oncilla|foundations|manager|umbra|sibl_gui",
                                 line):
                    search = re.search(
                        "^\s*import\s*(?P<moduleA>[\w+\.]+)|^\s*from\s*(?P<moduleB>[\w+\.]+)\s+import",
                        line)
                    if search:
                        statement = search.group("moduleA") or search.group(
                            "moduleB")
                        statement != "_" and imports.add(statement)
    return imports
Ejemplo n.º 4
0
	def testReadAll(self):
		"""
		This method tests :meth:`foundations.io.File.readAll` method.
		"""

		ioFile = File(TEST_FILE)
		self.assertIsInstance(ioFile.content, list)
		content = ioFile.readAll()
		self.assertIsInstance(ioFile.content, list)
		self.assertEqual(content, "".join(FILE_CONTENT))
Ejemplo n.º 5
0
    def testRead(self):
        """
		Tests :meth:`foundations.io.File.read` method.
		"""

        ioFile = File(TEXT_FILE)
        self.assertIsInstance(ioFile.content, list)
        content = ioFile.read()
        self.assertIsInstance(ioFile.content, list)
        self.assertEqual(content, "".join(FILE_CONTENT))
Ejemplo n.º 6
0
def getHDRLabsDocumentation(fileIn, fileOut):
    """
	This definition extracts sIBL_GUI Documentation body for HDRLabs.com.

	:param fileIn: File to convert. ( String )
	:param fileOut: Output file. ( String )
	"""

    LOGGER.info("{0} | Extracting 'body' tag content from {1}' file!".format(
        getHDRLabsDocumentation.__name__, fileIn))
    file = File(fileIn)
    file.cache()

    LOGGER.info("{0} | Processing 'body' data!".format(
        getHDRLabsDocumentation.__name__))
    content = []
    skipLine = True
    for line in file.content:
        if re.search(r"<body>", line):
            skipLine = False
        elif re.search(r"</body>", line):
            skipLine = True

        not skipLine and content.append("{0}\n".format(
            line.replace("\t", "", 2)))

    file = File(fileOut)
    file.content = content
    file.write()
Ejemplo n.º 7
0
def getHDRLabsDocumentation(input, output):
    """
	Extracts sIBL_GUI Documentation body for HDRLabs.com.

	:param input: Input file to extract documentation body.
	:type input: unicode
	:param output: Output html file.
	:type output: unicode
	:return: Definition success.
	:rtype: bool
	"""

    LOGGER.info("{0} | Extracting 'body' tag content from {1}' file!".format(
        getHDRLabsDocumentation.__name__, input))
    file = File(input)
    file.cache()

    LOGGER.info("{0} | Processing 'body' data!".format(
        getHDRLabsDocumentation.__name__))
    content = []
    skipLine = True
    for line in file.content:
        if re.search(r"<body>", line):
            skipLine = False
        elif re.search(r"</body>", line):
            skipLine = True

        not skipLine and content.append("{0}\n".format(
            line.replace("\t", "", 2)))

    file = File(output)
    file.content = content
    file.write()

    return True
Ejemplo n.º 8
0
    def testCache(self):
        """
		Tests :meth:`foundations.io.File.cache` method.
		"""

        ioFile = File(TEXT_FILE)
        self.assertIsInstance(ioFile.content, list)
        cacheSuccess = ioFile.cache()
        self.assertTrue(cacheSuccess)
        self.assertIsInstance(ioFile.content, list)
        self.assertListEqual(ioFile.content, FILE_CONTENT)
Ejemplo n.º 9
0
	def testRead(self):
		"""
		This method tests :meth:`foundations.io.File.read` method.
		"""

		ioFile = File(TEST_FILE)
		self.assertIsInstance(ioFile.content, list)
		readSuccess = ioFile.read()
		self.assertTrue(readSuccess)
		self.assertIsInstance(ioFile.content, list)
		self.assertListEqual(ioFile.content, FILE_CONTENT)
Ejemplo n.º 10
0
	def outputLoaderScript( self ) :
		'''
		This Method Output The Loader Script.
		
		@return: Output Success. ( Boolean )
		'''

		LOGGER.debug( "> Initializing Loader Script Output." )

		selectedTemplates = self._coreTemplatesOutliner.getSelectedTemplates()
		if selectedTemplates and len( selectedTemplates ) != 1:
			messageBox.messageBox( "Information", "Information", "{0} | Multiple Selected Templates, '{1}' Will Be Used !".format( self.__class__.__name__, selectedTemplates[0]._datas.name ) )

		template = selectedTemplates and selectedTemplates[0] or None

		if not template :
			raise foundations.exceptions.UserError, "{0} | In Order To Output The Loader Script, You Need To Select A Template !".format( self.__class__.__name__ )

		if not os.path.exists( template._datas.path ) :
			raise OSError, "{0} | '{1}' Template File Doesn't Exists !".format( self.__class__.__name__, template._datas.name )

		selectedIblSet = self._coreDatabaseBrowser.getSelectedItems()
		iblSet = selectedIblSet and selectedIblSet[0] or None

		if not iblSet :
			raise foundations.exceptions.UserError, "{0} | In Order To Output The Loader Script, You Need To Select A Set !".format( self.__class__.__name__ )

		if not os.path.exists( iblSet._datas.path ) :
			raise OSError, "{0} | '{1}' Ibl Set File Doesn't Exists !".format( self.__class__.__name__, iblSet._datas.name )

		self._overrideKeys = self.getDefaultOverrideKeys()

		for component in self._container.componentsManager.getComponents() :
			profile = self._container._componentsManager.components[component]
			interface = self._container.componentsManager.getInterface( component )
			if interface.activated and profile.name != self.name :
				hasattr( interface, "getOverrideKeys" ) and interface.getOverrideKeys()

		if self._container.parameters.loaderScriptsOutputDirectory :
			if os.path.exists( self._container.parameters.loaderScriptsOutputDirectory ) :
				loaderScript = File( os.path.join( self._container.parameters.loaderScriptsOutputDirectory, template._datas.outputScript ) )
			else :
				raise OSError, "{0} | '{1}' Loader Script Output Directory Doesn't Exists !".format( self.__class__.__name__, self._container.parameters.loaderScriptsOutputDirectory )
		else :
			loaderScript = File( os.path.join( self._ioDirectory, template._datas.outputScript ) )

		LOGGER.debug( "> Loader Script Output File Path : '{0}'.".format( loaderScript.file ) )

		loaderScript.content = self.getLoaderScript( template._datas.path, iblSet._datas.path, self._overrideKeys )
		if loaderScript.content and loaderScript.write() :
			messageBox.messageBox( "Information", "Information", "{0} | '{1}' Output Done !".format( self.__class__.__name__, template._datas.outputScript ) )
			return True
		else :
			raise Exception, "{0} | '{1}' Output Failed !".format( self.__class__.__name__, template._datas.outputScript )
Ejemplo n.º 11
0
def buildTocTree(title, input, output, contentDirectory):
    """
	Builds Sphinx documentation table of content tree file.

	:param title: Package title.
	:type title: unicode
	:param input: Input file to convert.
	:type input: unicode
	:param output: Output file.
	:type output: unicode
	:param contentDirectory: Directory containing the content to be included in the table of content.
	:type contentDirectory: unicode
	:return: Definition success.
	:rtype: bool
	"""

    LOGGER.info("{0} | Building Sphinx documentation index '{1}' file!".format(
        buildTocTree.__name__, output))
    file = File(input)
    file.cache()

    existingFiles = [
        foundations.strings.getSplitextBasename(item) for item in glob.glob(
            "{0}/*{1}".format(contentDirectory, FILES_EXTENSION))
    ]
    relativeDirectory = contentDirectory.replace(
        "{0}/".format(os.path.dirname(output)), "")

    tocTree = ["\n"]
    for line in file.content:
        search = re.search(r"`([a-zA-Z_ ]+)`_", line)
        if not search:
            continue

        item = search.groups()[0]
        code = "{0}{1}".format(item[0].lower(), item.replace(" ", "")[1:])
        if code in existingFiles:
            link = "{0}/{1}".format(relativeDirectory, code)
            data = "{0}{1}{2} <{3}>\n".format(" ", " " * line.index("-"), item,
                                              link)
            LOGGER.info("{0} | Adding '{1}' entry to Toc Tree!".format(
                buildTocTree.__name__, data.replace("\n", "")))
            tocTree.append(data)
    tocTree.append("\n")

    TOCTREE_TEMPLATE_BEGIN[0] = TOCTREE_TEMPLATE_BEGIN[0].format(title)
    TOCTREE_TEMPLATE_BEGIN[1] = TOCTREE_TEMPLATE_BEGIN[1].format(
        "=" * len(TOCTREE_TEMPLATE_BEGIN[0]))
    content = TOCTREE_TEMPLATE_BEGIN
    content.extend(tocTree)
    content.extend(TOCTREE_TEMPLATE_END)

    file = File(output)
    file.content = content
    file.write()

    return True
Ejemplo n.º 12
0
	def testWrite(self):
		"""
		This method tests :meth:`foundations.io.File.write` method.
		"""

		ioFile = File(tempfile.mkstemp()[1])
		self.assertIsInstance(ioFile.content, list)
		ioFile.content = FILE_CONTENT
		writeSuccess = ioFile.write()
		self.assertTrue(writeSuccess)
		ioFile.read()
		self.assertListEqual(ioFile.content, FILE_CONTENT)
		os.remove(ioFile.file)
Ejemplo n.º 13
0
def sliceReStructuredText(input, output):
	"""
	Slices given reStructuredText file.

	:param input: ReStructuredText file to slice.
	:type input: unicode
	:param output: Directory to output sliced reStructuredText files.
	:type output: unicode
	:return: Definition success.
	:rtype: bool
	"""

	LOGGER.info("{0} | Slicing '{1}' file!".format(sliceReStructuredText.__name__, input))
	file = File(input)
	file.cache()

	slices = OrderedDict()
	for i, line in enumerate(file.content):
		search = re.search(r"^\.\. \.(\w+)", line)
		if search:
			slices[search.groups()[0]] = i + SLICE_ATTRIBUTE_INDENT

	index = 0
	for slice, sliceStart in slices.iteritems():
		sliceFile = File(os.path.join(output, "{0}.{1}".format(slice, OUTPUT_FILES_EXTENSION)))
		LOGGER.info("{0} | Outputing '{1}' file!".format(sliceReStructuredText.__name__, sliceFile.path))
		sliceEnd = index < (len(slices.values()) - 1) and slices.values()[index + 1] - SLICE_ATTRIBUTE_INDENT or \
				   len(file.content)

		for i in range(sliceStart, sliceEnd):
			skipLine = False
			for item in CONTENT_DELETION:
				if re.search(item, file.content[i]):
					LOGGER.info("{0} | Skipping Line '{1}' with '{2}' content!".format(sliceReStructuredText.__name__,
																					   i,
																					   item))
					skipLine = True
					break

			if skipLine:
				continue

			line = file.content[i]
			for pattern, value in STATEMENT_SUBSTITUTE.iteritems():
				line = re.sub(pattern, value, line)

			search = re.search(r"-  `[\w ]+`_ \(([\w\.]+)\)", line)
			if search:
				LOGGER.info("{0} | Updating Line '{1}' link: '{2}'!".format(sliceReStructuredText.__name__,
																			i,
																			search.groups()[0]))
				line = "-  :ref:`{0}`\n".format(search.groups()[0])
			sliceFile.content.append(line)

		sliceFile.write()
		index += 1

	return True
def getDependenciesInformations():
	"""
	This definition gets sIBL_GUI dependencies informations file.
	"""

	content = ["[Dependencies]\n"]
	for dependency, path in DEPENDENCIES.iteritems():
		release = subprocess.Popen("cd {0} && {1} describe".format(path, GIT_EXECUTABLE),
									shell=True,
									stdout=subprocess.PIPE,
									stderr=subprocess.PIPE).communicate()[0]
		LOGGER.info("{0} | '{1}': '{2}'.".format(getDependenciesInformations.__name__, dependency, release.strip()))
		content.append("{0}={1}".format(dependency, release))
	file = File(DEPENDENCIES_FILE)
	file.content = content
	file.write()
Ejemplo n.º 15
0
def sliceDocumentation(fileIn, outputDirectory):
    """
	This Definition slices given documentation file.

	:param fileIn: File to convert. ( String )
	:param outputDirectory: Output directory. ( String )
	"""

    LOGGER.info("{0} | Slicing '{1}' file!".format(sliceDocumentation.__name__,
                                                   fileIn))
    file = File(fileIn)
    file.cache()

    slices = OrderedDict()
    for i, line in enumerate(file.content):
        search = re.search(r"^\.\. \.(\w+)", line)
        if search:
            slices[search.groups()[0]] = i + SLICE_ATTRIBUTE_INDENT

    index = 0
    for slice, sliceStart in slices.iteritems():
        sliceFile = File(
            os.path.join(outputDirectory,
                         "{0}.{1}".format(slice, OUTPUT_FILES_EXTENSION)))
        LOGGER.info("{0} | Outputing '{1}' file!".format(
            sliceDocumentation.__name__, sliceFile.path))
        sliceEnd = index < (len(slices.values()) - 1) and slices.values()[index + 1] - SLICE_ATTRIBUTE_INDENT or \
        len(file.content)

        for i in range(sliceStart, sliceEnd):
            skipLine = False
            for item in CONTENT_DELETION:
                if re.search(item, file.content[i]):
                    LOGGER.info(
                        "{0} | Skipping Line '{1}' with '{2}' content!".format(
                            sliceDocumentation.__name__, i, item))
                    skipLine = True
                    break

            if skipLine:
                continue

            line = file.content[i]
            for pattern, value in CONTENT_SUBSTITUTIONS.iteritems():
                line = re.sub(pattern, value, line)

            search = re.search(r"-  `[\w ]+`_ \(([\w\.]+)\)", line)
            if search:
                LOGGER.info("{0} | Updating Line '{1}' link: '{2}'!".format(
                    sliceDocumentation.__name__, i,
                    search.groups()[0]))
                line = "-  :ref:`{0}`\n".format(search.groups()[0])
            sliceFile.content.append(line)

        sliceFile.write()
        index += 1
def getSphinxDocumentationTocTree(title, fileIn, fileOut, contentDirectory):
    """
	This definition gets Sphinx documentation index file.

	:param title: Package title. ( String )
	:param fileIn: File to convert. ( String )
	:param fileOut: Output file. ( String )
	:param contentDirectory: Content directory. ( String )
	"""

    LOGGER.info("{0} | Building Sphinx documentation index '{1}' file!".format(
        getSphinxDocumentationTocTree.__name__, fileOut))
    file = File(fileIn)
    file.cache()

    existingFiles = [
        foundations.strings.getSplitextBasename(item) for item in glob.glob(
            "{0}/*{1}".format(contentDirectory, FILES_EXTENSION))
    ]
    relativeDirectory = contentDirectory.replace(
        "{0}/".format(os.path.dirname(fileOut)), "")

    tocTree = ["\n"]
    for line in file.content:
        search = re.search(r"`([a-zA-Z_ ]+)`_", line)
        if not search:
            continue

        item = search.groups()[0]
        code = "{0}{1}".format(item[0].lower(), item.replace(" ", "")[1:])
        if code in existingFiles:
            link = "{0}/{1}".format(relativeDirectory, code)
            data = "{0}{1}{2} <{3}>\n".format(" ", " " * line.index("-"), item,
                                              link)
            LOGGER.info("{0} | Adding '{1}' entry to Toc Tree!".format(
                getSphinxDocumentationTocTree.__name__, data.replace("\n",
                                                                     "")))
            tocTree.append(data)
    tocTree.append("\n")

    TOCTREE_TEMPLATE_BEGIN[0] = TOCTREE_TEMPLATE_BEGIN[0].format(title)
    TOCTREE_TEMPLATE_BEGIN[1] = TOCTREE_TEMPLATE_BEGIN[1].format(
        "=" * len(TOCTREE_TEMPLATE_BEGIN[0]))
    content = TOCTREE_TEMPLATE_BEGIN
    content.extend(tocTree)
    content.extend(TOCTREE_TEMPLATE_END)

    file = File(fileOut)
    file.content = content
    file.write()
def getHDRLabsDocumentation(fileIn, fileOut):
	"""
	This definition extracts sIBL_GUI Documentation body for HDRLabs.com.

	:param fileIn: File to convert. ( String )
	:param fileOut: Output file. ( String )
	"""

	LOGGER.info("{0} | Extracting 'body' tag content from {1}' file!".format(getHDRLabsDocumentation.__name__, fileIn))
	file = File(fileIn)
	file.cache()

	LOGGER.info("{0} | Processing 'body' data!".format(getHDRLabsDocumentation.__name__))
	content = []
	skipLine = True
	for line in file.content:
		if re.search(r"<body>", line):
			skipLine = False
		elif re.search(r"</body>", line):
			skipLine = True

		not skipLine and content.append("{0}\n".format(line.replace("\t", "", 2)))

	file = File(fileOut)
	file.content = content
	file.write()
Ejemplo n.º 18
0
def getHDRLabsDocumentation(input, output):
	"""
	Extracts sIBL_GUI Documentation body for HDRLabs.com.

	:param input: Input file to extract documentation body.
	:type input: unicode
	:param output: Output html file.
	:type output: unicode
	:return: Definition success.
	:rtype: bool
	"""

	LOGGER.info("{0} | Extracting 'body' tag content from {1}' file!".format(getHDRLabsDocumentation.__name__, input))
	file = File(input)
	file.cache()

	LOGGER.info("{0} | Processing 'body' data!".format(getHDRLabsDocumentation.__name__))
	content = []
	skipLine = True
	for line in file.content:
		if re.search(r"<body>", line):
			skipLine = False
		elif re.search(r"</body>", line):
			skipLine = True

		not skipLine and content.append("{0}\n".format(line.replace("\t", "", 2)))

	file = File(output)
	file.content = content
	file.write()

	return True
Ejemplo n.º 19
0
def reStructuredTextToHtml(input, output, cssFile):
    """
	Outputs a reStructuredText file to html.

	:param input: Input reStructuredText file to convert.
	:type input: unicode
	:param output: Output html file.
	:type output: unicode
	:param cssFile: Css file.
	:type cssFile: unicode
	:return: Definition success.
	:rtype: bool
	"""

    LOGGER.info("{0} | Converting '{1}' reStructuredText file to html!".format(
        reStructuredTextToHtml.__name__, input))
    os.system("{0} --stylesheet-path='{1}' '{2}' > '{3}'".format(
        RST2HTML, os.path.join(os.path.dirname(__file__), cssFile), input,
        output))

    LOGGER.info("{0} | Formatting html file!".format("Tidy"))
    os.system("tidy -config {0} -m '{1}'".format(
        os.path.join(os.path.dirname(__file__), TIDY_SETTINGS_FILE), output))

    file = File(output)
    file.cache()
    LOGGER.info("{0} | Replacing spaces with tabs!".format(
        reStructuredTextToHtml.__name__))
    file.content = [line.replace(" " * 4, "\t") for line in file.content]
    file.write()

    return True
def getDependenciesInformations():
    """
	This definition gets sIBL_GUI dependencies informations file.
	"""

    content = ["[Dependencies]\n"]
    for dependency, path in DEPENDENCIES.iteritems():
        release = subprocess.Popen("cd {0} && {1} describe".format(
            path, GIT_EXECUTABLE),
                                   shell=True,
                                   stdout=subprocess.PIPE,
                                   stderr=subprocess.PIPE).communicate()[0]
        LOGGER.info("{0} | '{1}': '{2}'.".format(
            getDependenciesInformations.__name__, dependency, release.strip()))
        content.append("{0}={1}".format(dependency, release))
    file = File(DEPENDENCIES_FILE)
    file.content = content
    file.write()
Ejemplo n.º 21
0
def bleach(file):
	"""
	Sanitizes given python module.

	:param file: Python module file.
	:type file: unicode
	:return: Definition success.
	:rtype: bool
	"""

	LOGGER.info("{0} | Sanitizing '{1}' python module!".format(__name__, file))

	sourceFile = File(file)
	content = sourceFile.read()
	for pattern in STATEMENT_SUBSTITUTE:
		matches = [match for match in re.finditer(pattern, content, re.DOTALL)]

		offset = 0
		for match in matches:
			start, end = match.start("bleach"), match.end("bleach")
			substitution = "{0}{1}".format(STATEMENT_UPDATE_MESSAGE,
										   re.sub("\n", "\n{0}".format(STATEMENT_UPDATE_MESSAGE),
												  match.group("bleach")))
			content = "".join((content[0: start + offset],
							   substitution,
							   content[end + offset:]))
			offset += len(substitution) - len(match.group("bleach"))

	sourceFile.content = [content]
	sourceFile.write()

	return True
Ejemplo n.º 22
0
	def outputLoaderScript(self, template, iblSet):
		"""
		This method outputs the Loader Script.

		:param template: Template. ( Template )
		:param iblSet: Ibl Set. ( IblSet )
		:return: Loader Script file. ( String )
		"""

		self.__overrideKeys = self.getDefaultOverrideKeys()

		for component in self.__engine.componentsManager.listComponents():
			profile = self.__engine.componentsManager.components[component]
			interface = self.__engine.componentsManager.getInterface(component)
			if interface.activated and profile.name != self.name:
				hasattr(interface, "getOverrideKeys") and interface.getOverrideKeys()

		if self.__engine.parameters.loaderScriptsOutputDirectory:
			if foundations.common.pathExists(self.__engine.parameters.loaderScriptsOutputDirectory):
				loaderScript = File(os.path.join(self.__engine.parameters.loaderScriptsOutputDirectory, template.outputScript))
			else:
				raise foundations.exceptions.DirectoryExistsError(
				"{0} | '{1}' loader Script output directory doesn't exists!".format(
				self.__class__.__name__, self.__engine.parameters.loaderScriptsOutputDirectory))
		else:
			loaderScript = File(os.path.join(self.__ioDirectory, template.outputScript))

		LOGGER.debug("> Loader Script output file path: '{0}'.".format(loaderScript.path))

		loaderScript.content = self.getLoaderScript(template.path, iblSet.path, self.__overrideKeys)

		if loaderScript.content and loaderScript.write():
			return loaderScript.path
Ejemplo n.º 23
0
    def testClear(self):
        """
		Tests :meth:`foundations.io.File.clear` method.
		"""

        fileDescriptor, path = tempfile.mkstemp()
        ioFile = File(unicode(path))
        self.assertIsInstance(ioFile.content, list)
        ioFile.content = FILE_CONTENT
        ioFile.write()
        self.assertTrue(ioFile.clear())
        ioFile.cache()
        self.assertListEqual(ioFile.content, [])
        os.close(fileDescriptor)
def reStructuredTextToHtml(input, output, cssFile):
	"""
	Outputs a reStructuredText file to html.

	:param input: Input reStructuredText file to convert.
	:type input: unicode
	:param output: Output html file.
	:type output: unicode
	:param cssFile: Css file.
	:type cssFile: unicode
	:return: Definition success.
	:rtype: bool
	"""

	LOGGER.info("{0} | Converting '{1}' reStructuredText file to html!".format(reStructuredTextToHtml.__name__, input))
	os.system("{0} --stylesheet-path='{1}' '{2}' > '{3}'".format(RST2HTML,
																 os.path.join(os.path.dirname(__file__), cssFile),
																 input,
																 output))

	LOGGER.info("{0} | Formatting html file!".format("Tidy"))
	os.system("tidy -config {0} -m '{1}'".format(os.path.join(os.path.dirname(__file__), TIDY_SETTINGS_FILE), output))

	file = File(output)
	file.cache()
	LOGGER.info("{0} | Replacing spaces with tabs!".format(reStructuredTextToHtml.__name__))
	file.content = [line.replace(" " * 4, "\t") for line in file.content]
	file.write()

	return True
Ejemplo n.º 25
0
    def testAppend(self):
        """
		This method tests :meth:`foundations.io.File.append` method.
		"""

        fileDescriptor, path = tempfile.mkstemp()
        ioFile = File(path)
        self.assertIsInstance(ioFile.content, list)
        ioFile.content = FILE_CONTENT
        ioFile.write()
        append = ioFile.append()
        self.assertTrue(append)
        ioFile.cache()
        self.assertListEqual(ioFile.content, FILE_CONTENT + FILE_CONTENT)
        os.close(fileDescriptor)
Ejemplo n.º 26
0
	def saveFiles(self, nodes):
		"""
		Saves user defined files using give nodes.

		:param nodes: Nodes.
		:type nodes: list
		:return: Method success.
		:rtype: bool
		"""

		metrics = {"Opened" : 0, "Cached" : 0}
		for node in nodes:
			file = node.file
			if self.__container.getEditor(file):
				if self.__container.saveFile(file):
					metrics["Opened"] += 1
					self.__uncache(file)
			else:
				cacheData = self.__filesCache.getContent(file)
				if cacheData is None:
					LOGGER.warning(
					"!> {0} | '{1}' file doesn't exists in files cache!".format(self.__class__.__name__, file))
					continue

				if cacheData.document:
					fileHandle = File(file)
					fileHandle.content = [cacheData.document.toPlainText().toUtf8()]
					if fileHandle.write():
						metrics["Cached"] += 1
						self.__uncache(file)
				else:
					LOGGER.warning(
					"!> {0} | '{1}' file document doesn't exists in files cache!".format(self.__class__.__name__, file))

		self.__container.engine.notificationsManager.notify(
		"{0} | '{1}' opened file(s) and '{2}' cached file(s) saved!".format(self.__class__.__name__,
																		metrics["Opened"],
																		metrics["Cached"]))
Ejemplo n.º 27
0
def buildTocTree(title, input, output, contentDirectory):
	"""
	Builds Sphinx documentation table of content tree file.

	:param title: Package title.
	:type title: unicode
	:param input: Input file to convert.
	:type input: unicode
	:param output: Output file.
	:type output: unicode
	:param contentDirectory: Directory containing the content to be included in the table of content.
	:type contentDirectory: unicode
	:return: Definition success.
	:rtype: bool
	"""

	LOGGER.info("{0} | Building Sphinx documentation index '{1}' file!".format(buildTocTree.__name__,
																			   output))
	file = File(input)
	file.cache()

	existingFiles = [foundations.strings.getSplitextBasename(item)
					 for item in glob.glob("{0}/*{1}".format(contentDirectory, FILES_EXTENSION))]
	relativeDirectory = contentDirectory.replace("{0}/".format(os.path.dirname(output)), "")

	tocTree = ["\n"]
	for line in file.content:
		search = re.search(r"`([a-zA-Z_ ]+)`_", line)
		if not search:
			continue

		item = search.groups()[0]
		code = "{0}{1}".format(item[0].lower(), item.replace(" ", "")[1:])
		if code in existingFiles:
			link = "{0}/{1}".format(relativeDirectory, code)
			data = "{0}{1}{2} <{3}>\n".format(" ", " " * line.index("-"), item, link)
			LOGGER.info("{0} | Adding '{1}' entry to Toc Tree!".format(buildTocTree.__name__,
																	   data.replace("\n", "")))
			tocTree.append(data)
	tocTree.append("\n")

	TOCTREE_TEMPLATE_BEGIN[0] = TOCTREE_TEMPLATE_BEGIN[0].format(title)
	TOCTREE_TEMPLATE_BEGIN[1] = TOCTREE_TEMPLATE_BEGIN[1].format("=" * len(TOCTREE_TEMPLATE_BEGIN[0]))
	content = TOCTREE_TEMPLATE_BEGIN
	content.extend(tocTree)
	content.extend(TOCTREE_TEMPLATE_END)

	file = File(output)
	file.content = content
	file.write()

	return True
Ejemplo n.º 28
0
	def applyPatch(self, patch):
		"""
		Applies given patch.

		:param patch: Patch.
		:type patch: Patch
		:return: Method success.
		:rtype: bool
		"""

		historyFile = File(self.__historyFile)
		patchesHistory = historyFile.cache() and [line.strip() for line in historyFile.content] or []

		if patch.uid not in patchesHistory:
			LOGGER.debug("> Applying '{0}' patch!".format(patch.name))
			if patch.apply():
				historyFile.content = ["{0}\n".format(patch.uid)]
				historyFile.append()
			else:
				raise umbra.exceptions.PatchApplyError("{0} | '{1}' patch failed to apply!".format(
					self.__class__.__name__, patch.path))
		else:
			LOGGER.debug("> '{0}' patch is already applied!".format(patch.name))
		return True
Ejemplo n.º 29
0
    def testUncache(self):
        """
		Tests :meth:`foundations.io.File.uncache` method.
		"""

        ioFile = File(TEXT_FILE)
        ioFile.cache()
        self.assertListEqual(ioFile.content, FILE_CONTENT)
        ioFile.uncache()
        self.assertListEqual(ioFile.content, [])
Ejemplo n.º 30
0
	def testAppend(self):
		"""
		This method tests :meth:`foundations.io.File.append` method.
		"""

		ioFile = File(tempfile.mkstemp()[1])
		self.assertIsInstance(ioFile.content, list)
		ioFile.content = FILE_CONTENT
		ioFile.write()
		append = ioFile.append()
		self.assertTrue(append)
		ioFile.read()
		self.assertListEqual(ioFile.content, FILE_CONTENT + FILE_CONTENT)
		os.remove(ioFile.file)
Ejemplo n.º 31
0
def textileToHtml( fileIn, fileOut, title ):
	'''
	This Definition Outputs A Textile File To HTML.
		
	@param fileIn: File To Convert. ( String )
	@param fileOut: Output File. ( String )
	@param title: HTML File Title. ( String )
	'''

	LOGGER.info( "{0} | Converting '{1}' Textile File To HTML !".format( textileToHtml.__name__, fileIn ) )
	file = File( fileIn )
	file.read()

	output = []
	output.append( "<html>\n\t<head>\n" )
	output.append( "\t\t<title>{0}</title>\n".format( title ) )
	output.append( 
			"""\t\t<style type="text/css">
	            body {
	                text-align: justify;
	                font-size: 10pt;
	                margin: 10px 10px 10px 10px;
	                background-color: rgb(192, 192, 192);
	                color: rgb(50, 50, 50);
	            }
	            A:link {
	                text-decoration: none;
	                color: rgb(50, 85, 125);
	            }
	            A:visited {
	                text-decoration: none;
	                color: rgb(50, 85, 125);
	            }
	            A:active {
	                text-decoration: none;
	                color: rgb(50, 85, 125);
	            }
	            A:hover {
	                text-decoration: underline;
	                color: rgb(50, 85, 125);
	            }
	        </style>\n""" )
	output.append( "\t</head>\n\t<body>\n\t" )
	output.append( "\n\t".join( [line for line in textile.textile( "".join( file.content ) ).split( "\n" ) if line] ) )
	output.append( "\t\t</span>\n" )
	output.append( "\t</body>\n</html>" )

	file = File( fileOut )
	file.content = output
	file.write()
Ejemplo n.º 32
0
def sliceDocumentation(fileIn, outputDirectory):
	"""
	This Definition slices given documentation file.

	:param fileIn: File to convert. ( String )
	:param outputDirectory: Output directory. ( String )
	"""

	LOGGER.info("{0} | Slicing '{1}' file!".format(sliceDocumentation.__name__, fileIn))
	file = File(fileIn)
	file.cache()

	slices = OrderedDict()
	for i, line in enumerate(file.content):
		search = re.search(r"^\.\. \.(\w+)", line)
		if search:
			slices[search.groups()[0]] = i + SLICE_ATTRIBUTE_INDENT

	index = 0
	for slice, sliceStart in slices.iteritems():
		sliceFile = File(os.path.join(outputDirectory, "{0}.{1}".format(slice, OUTPUT_FILES_EXTENSION)))
		LOGGER.info("{0} | Outputing '{1}' file!".format(sliceDocumentation.__name__, sliceFile.path))
		sliceEnd = index < (len(slices.values()) - 1) and slices.values()[index + 1] - SLICE_ATTRIBUTE_INDENT or \
		len(file.content)

		for i in range(sliceStart, sliceEnd):
			skipLine = False
			for item in CONTENT_DELETION:
				if re.search(item, file.content[i]):
					LOGGER.info("{0} | Skipping Line '{1}' with '{2}' content!".format(sliceDocumentation.__name__,
																						i,
																						item))
					skipLine = True
					break

			if skipLine:
				continue

			line = file.content[i]
			for pattern, value in CONTENT_SUBSTITUTIONS.iteritems():
				line = re.sub(pattern, value, line)

			search = re.search(r"-  `[\w ]+`_ \(([\w\.]+)\)", line)
			if search:
				LOGGER.info("{0} | Updating Line '{1}' link: '{2}'!".format(sliceDocumentation.__name__,
																			i,
																			search.groups()[0]))
				line = "-  :ref:`{0}`\n".format(search.groups()[0])
			sliceFile.content.append(line)

		sliceFile.write()
		index += 1
Ejemplo n.º 33
0
    def testWrite(self):
        """
		This method tests :meth:`foundations.io.File.write` method.
		"""

        fileDescriptor, path = tempfile.mkstemp()
        ioFile = File(path)
        self.assertIsInstance(ioFile.content, list)
        ioFile.content = FILE_CONTENT
        writeSuccess = ioFile.write()
        self.assertTrue(writeSuccess)
        ioFile.cache()
        self.assertListEqual(ioFile.content, FILE_CONTENT)
        os.close(fileDescriptor)
def getSphinxDocumentationTocTree(title, fileIn, fileOut, contentDirectory):
	"""
	This definition gets Sphinx documentation index file.

	:param title: Package title. ( String )
	:param fileIn: File to convert. ( String )
	:param fileOut: Output file. ( String )
	:param contentDirectory: Content directory. ( String )
	"""

	LOGGER.info("{0} | Building Sphinx documentation index '{1}' file!".format(getSphinxDocumentationTocTree.__name__,
																				fileOut))
	file = File(fileIn)
	file.cache()

	existingFiles = [foundations.strings.getSplitextBasename(item)
					for item in glob.glob("{0}/*{1}".format(contentDirectory, FILES_EXTENSION))]
	relativeDirectory = contentDirectory.replace("{0}/".format(os.path.dirname(fileOut)), "")

	tocTree = ["\n"]
	for line in file.content:
		search = re.search(r"`([a-zA-Z_ ]+)`_", line)
		if not search:
			continue

		item = search.groups()[0]
		code = "{0}{1}".format(item[0].lower(), item.replace(" ", "")[1:])
		if code in existingFiles:
			link = "{0}/{1}".format(relativeDirectory, code)
			data = "{0}{1}{2} <{3}>\n".format(" ", " " * line.index("-"), item, link)
			LOGGER.info("{0} | Adding '{1}' entry to Toc Tree!".format(getSphinxDocumentationTocTree.__name__,
																		data.replace("\n", "")))
			tocTree.append(data)
	tocTree.append("\n")

	TOCTREE_TEMPLATE_BEGIN[0] = TOCTREE_TEMPLATE_BEGIN[0].format(title)
	TOCTREE_TEMPLATE_BEGIN[1] = TOCTREE_TEMPLATE_BEGIN[1].format("=" * len(TOCTREE_TEMPLATE_BEGIN[0]))
	content = TOCTREE_TEMPLATE_BEGIN
	content.extend(tocTree)
	content.extend(TOCTREE_TEMPLATE_END)

	file = File(fileOut)
	file.content = content
	file.write()
Ejemplo n.º 35
0
def reStructuredTextToHtml(fileIn, fileOut):
    """
	This definition outputs a reStructuredText file to html.

	:param fileIn: File to convert. ( String )
	:param fileOut: Output file. ( String )
	"""

    LOGGER.info("{0} | Converting '{1}' reStructuredText file to html!".format(
        reStructuredTextToHtml.__name__, fileIn))
    os.system("{0} --stylesheet-path='{1}' '{2}' > '{3}'".format(
        RST2HTML, os.path.join(os.path.dirname(__file__), CSS_FILE), fileIn,
        fileOut))

    LOGGER.info("{0} | Formatting html file!".format("Tidy"))
    os.system("tidy -config {0} -m '{1}'".format(
        os.path.join(os.path.dirname(__file__), TIDY_SETTINGS_FILE), fileOut))

    file = File(fileOut)
    file.cache()
    LOGGER.info("{0} | Replacing spaces with tabs!".format(
        reStructuredTextToHtml.__name__))
    file.content = [line.replace(" " * 4, "\t") for line in file.content]
    file.write()
def reStructuredTextToHtml(fileIn, fileOut):
	"""
	This definition outputs a reStructuredText file to html.

	:param fileIn: File to convert. ( String )
	:param fileOut: Output file. ( String )
	"""

	LOGGER.info("{0} | Converting '{1}' reStructuredText file to html!".format(reStructuredTextToHtml.__name__, fileIn))
	os.system("{0} --stylesheet-path='{1}' '{2}' > '{3}'".format(RST2HTML,
																os.path.join(os.path.dirname(__file__), CSS_FILE),
																fileIn,
																fileOut))

	LOGGER.info("{0} | Formatting html file!".format("Tidy"))
	os.system("tidy -config {0} -m '{1}'".format(os.path.join(os.path.dirname(__file__), TIDY_SETTINGS_FILE), fileOut))

	file = File(fileOut)
	file.read()
	LOGGER.info("{0} | Replacing spaces with tabs!".format(reStructuredTextToHtml.__name__))
	file.content = [line.replace(" " * 4, "\t") for line in file.content]
	file.write()
def getSphinxDocumentationApi(packages, cloneDirectory, outputDirectory, apiFile):
	"""
	This definition gets Sphinx documentation API.

	:param packages: Packages. ( String )
	:param cloneDirectory: Source clone directory. ( String )
	:param outputDirectory: Content directory. ( String )
	:param apiFile: API file. ( String )
	"""

	LOGGER.info("{0} | Building Sphinx documentation API!".format(getSphinxDocumentationApi.__name__))

	if os.path.exists(cloneDirectory):
		shutil.rmtree(cloneDirectory)
		os.makedirs(cloneDirectory)

	packagesModules = {"apiModules" : [],
					"testsModules" : []}
	for package in packages.split(","):
		package = __import__(package)
		path = foundations.common.getFirstItem(package.__path__)
		sourceDirectory = os.path.dirname(path)

		for file in sorted(list(foundations.walkers.filesWalker(sourceDirectory, filtersIn=("{0}.*\.ui$".format(path),)))):
			LOGGER.info("{0} | Ui file: '{1}'".format(getSphinxDocumentationApi.__name__, file))
			targetDirectory = os.path.dirname(file).replace(sourceDirectory, "")
			directory = "{0}{1}".format(cloneDirectory, targetDirectory)
			if not foundations.common.pathExists(directory):
				os.makedirs(directory)
			source = os.path.join(directory, os.path.basename(file))
			shutil.copyfile(file, source)

		modules = []
		for file in sorted(list(foundations.walkers.filesWalker(sourceDirectory, filtersIn=("{0}.*\.py$".format(path),),
		filtersOut=EXCLUDED_PYTHON_MODULES))):
			LOGGER.info("{0} | Python file: '{1}'".format(getSphinxDocumentationApi.__name__, file))
			module = "{0}.{1}" .format((".".join(os.path.dirname(file).replace(sourceDirectory, "").split("/"))),
												foundations.strings.getSplitextBasename(file)).strip(".")
			LOGGER.info("{0} | Module name: '{1}'".format(getSphinxDocumentationApi.__name__, module))
			directory = os.path.dirname(os.path.join(cloneDirectory, module.replace(".", "/")))
			if not foundations.common.pathExists(directory):
				os.makedirs(directory)
			source = os.path.join(directory, os.path.basename(file))
			shutil.copyfile(file, source)

			sourceFile = File(source)
			sourceFile.cache()
			trimFromIndex = trimEndIndex = None
			inMultilineString = inDecorator = False
			for i, line in enumerate(sourceFile.content):
				if re.search(r"__name__ +\=\= +\"__main__\"", line):
					trimFromIndex = i
				for pattern, value in CONTENT_SUBSTITUTIONS.iteritems():
					if re.search(pattern, line):
						sourceFile.content[i] = re.sub(pattern, value, line)

				strippedLine = line.strip()
				if re.search(r"^\"\"\"", strippedLine):
					inMultilineString = not inMultilineString

				if inMultilineString:
					continue

				if re.search(r"^@\w+", strippedLine) and \
				not re.search(r"@property", strippedLine) and \
				not re.search(r"^@\w+\.setter", strippedLine) and \
				not re.search(r"^@\w+\.deleter", strippedLine):
					inDecorator = True
					indent = re.search(r"^([ \t]*)", line)

				if re.search(r"^[ \t]*def \w+", sourceFile.content[i]) or \
					re.search(r"^[ \t]*class \w+", sourceFile.content[i]):
					inDecorator = False

				if not inDecorator:
					continue

				sourceFile.content[i] = "{0}{1} {2}".format(indent.groups()[0], DECORATORS_COMMENT_MESSAGE, line)

			if trimFromIndex:
				LOGGER.info("{0} | Trimming '__main__' statements!".format(getSphinxDocumentationApi.__name__))
				content = [sourceFile.content[i] for i in range(trimFromIndex)]
				content.append("{0}\n".format(STATEMENTS_UPDATE_MESSAGGE))
				sourceFile.content = content
			sourceFile.write()

			if "__init__.py" in file:
				continue

			rstFilePath = "{0}{1}".format(module, FILES_EXTENSION)
			LOGGER.info("{0} | Building API file: '{1}'".format(getSphinxDocumentationApi.__name__, rstFilePath))
			rstFile = File(os.path.join(outputDirectory, rstFilePath))
			header = ["_`{0}`\n".format(module),
					"==={0}\n".format("="*len(module)),
					"\n",
					".. automodule:: {0}\n".format(module),
					"\n"]
			rstFile.content.extend(header)

			functions = OrderedDict()
			classes = OrderedDict()
			moduleAttributes = OrderedDict()
			for member, object in moduleBrowser._readmodule(module, [source, ]).iteritems():
				if object.__class__ == moduleBrowser.Function:
					if not member.startswith("_"):
						functions[member] = [".. autofunction:: {0}\n".format(member)]
				elif object.__class__ == moduleBrowser.Class:
					classes[member] = [".. autoclass:: {0}\n".format(member),
										"	:show-inheritance:\n",
										"	:members:\n"]
				elif object.__class__ == moduleBrowser.Global:
					if not member.startswith("_"):
						moduleAttributes[member] = [".. attribute:: {0}.{1}\n".format(module, member)]

			moduleAttributes and rstFile.content.append("Module Attributes\n-----------------\n\n")
			for moduleAttribute in moduleAttributes.itervalues():
				rstFile.content.extend(moduleAttribute)
				rstFile.content.append("\n")

			functions and rstFile.content.append("Functions\n---------\n\n")
			for function in functions.itervalues():
				rstFile.content.extend(function)
				rstFile.content.append("\n")

			classes and rstFile.content.append("Classes\n-------\n\n")
			for class_ in classes.itervalues():
				rstFile.content.extend(class_)
				rstFile.content.append("\n")

			rstFile.write()
			modules.append(module)

		packagesModules["apiModules"].extend([module for module in modules if not "tests" in module])
		packagesModules["testsModules"].extend([module for module in modules if "tests" in module])

	apiFile = File(apiFile)
	apiFile.content.extend(TOCTREE_TEMPLATE_BEGIN)
	for module in packagesModules["apiModules"]:
		apiFile.content.append("   {0} <{1}>\n".format(module, "api/{0}".format(module)))
	for module in packagesModules["testsModules"]:
		apiFile.content.append("   {0} <{1}>\n".format(module, "api/{0}".format(module)))
	apiFile.content.extend(TOCTREE_TEMPLATE_END)
	apiFile.write()
Ejemplo n.º 38
0
def buildApi(packages, input, output, sanitizer, excludedModules=None):
	"""
	Builds the Sphinx documentation API.

	:param packages: Packages to include in the API.
	:type packages: list
	:param input: Input modules directory.
	:type input: unicode
	:param output: Output reStructuredText files directory.
	:type output: unicode
	:param sanitizer: Sanitizer python module.
	:type sanitizer: unicode
	:param excludedModules: Excluded modules.
	:type excludedModules: list
	:return: Definition success.
	:rtype: bool
	"""

	LOGGER.info("{0} | Building Sphinx documentation API!".format(buildApi.__name__))

	sanitizer = importSanitizer(sanitizer)

	if os.path.exists(input):
		shutil.rmtree(input)
		os.makedirs(input)

	excludedModules = [] if excludedModules is None else excludedModules

	packagesModules = {"apiModules": [],
					   "testsModules": []}
	for package in packages:
		package = __import__(package)
		path = foundations.common.getFirstItem(package.__path__)
		packageDirectory = os.path.dirname(path)

		for file in sorted(
				list(foundations.walkers.filesWalker(packageDirectory, filtersIn=("{0}.*\.ui$".format(path),)))):
			LOGGER.info("{0} | Ui file: '{1}'".format(buildApi.__name__, file))
			targetDirectory = os.path.dirname(file).replace(packageDirectory, "")
			directory = "{0}{1}".format(input, targetDirectory)
			if not foundations.common.pathExists(directory):
				os.makedirs(directory)
			source = os.path.join(directory, os.path.basename(file))
			shutil.copyfile(file, source)

		modules = []
		for file in sorted(
				list(foundations.walkers.filesWalker(packageDirectory, filtersIn=("{0}.*\.py$".format(path),),
													 filtersOut=excludedModules))):
			LOGGER.info("{0} | Python file: '{1}'".format(buildApi.__name__, file))
			module = "{0}.{1}".format((".".join(os.path.dirname(file).replace(packageDirectory, "").split("/"))),
									  foundations.strings.getSplitextBasename(file)).strip(".")
			LOGGER.info("{0} | Module name: '{1}'".format(buildApi.__name__, module))
			directory = os.path.dirname(os.path.join(input, module.replace(".", "/")))
			if not foundations.common.pathExists(directory):
				os.makedirs(directory)
			source = os.path.join(directory, os.path.basename(file))
			shutil.copyfile(file, source)

			sanitizer.bleach(source)

			if "__init__.py" in file:
				continue

			rstFilePath = "{0}{1}".format(module, FILES_EXTENSION)
			LOGGER.info("{0} | Building API file: '{1}'".format(buildApi.__name__, rstFilePath))
			rstFile = File(os.path.join(output, rstFilePath))
			header = ["_`{0}`\n".format(module),
					  "==={0}\n".format("=" * len(module)),
					  "\n",
					  ".. automodule:: {0}\n".format(module),
					  "\n"]
			rstFile.content.extend(header)

			functions = OrderedDict()
			classes = OrderedDict()
			moduleAttributes = OrderedDict()
			for member, object in moduleBrowser._readmodule(module, [source, ]).iteritems():
				if object.__class__ == moduleBrowser.Function:
					if not member.startswith("_"):
						functions[member] = [".. autofunction:: {0}\n".format(member)]
				elif object.__class__ == moduleBrowser.Class:
					classes[member] = [".. autoclass:: {0}\n".format(member),
									   "	:show-inheritance:\n",
									   "	:members:\n"]
				elif object.__class__ == moduleBrowser.Global:
					if not member.startswith("_"):
						moduleAttributes[member] = [".. attribute:: {0}.{1}\n".format(module, member)]

			moduleAttributes and rstFile.content.append("Module Attributes\n-----------------\n\n")
			for moduleAttribute in moduleAttributes.itervalues():
				rstFile.content.extend(moduleAttribute)
				rstFile.content.append("\n")

			functions and rstFile.content.append("Functions\n---------\n\n")
			for function in functions.itervalues():
				rstFile.content.extend(function)
				rstFile.content.append("\n")

			classes and rstFile.content.append("Classes\n-------\n\n")
			for class_ in classes.itervalues():
				rstFile.content.extend(class_)
				rstFile.content.append("\n")

			rstFile.write()
			modules.append(module)

		packagesModules["apiModules"].extend([module for module in modules if not "tests" in module])
		packagesModules["testsModules"].extend([module for module in modules if "tests" in module])

	apiFile = File("{0}{1}".format(output, FILES_EXTENSION))
	apiFile.content.extend(TOCTREE_TEMPLATE_BEGIN)
	for module in packagesModules["apiModules"]:
		apiFile.content.append("   {0} <{1}>\n".format(module, "api/{0}".format(module)))
	for module in packagesModules["testsModules"]:
		apiFile.content.append("   {0} <{1}>\n".format(module, "api/{0}".format(module)))
	apiFile.content.extend(TOCTREE_TEMPLATE_END)
	apiFile.write()

	return True
def sliceReStructuredText(input, output):
    """
	Slices given reStructuredText file.

	:param input: ReStructuredText file to slice.
	:type input: unicode
	:param output: Directory to output sliced reStructuredText files.
	:type output: unicode
	:return: Definition success.
	:rtype: bool
	"""

    LOGGER.info("{0} | Slicing '{1}' file!".format(sliceReStructuredText.__name__, input))
    file = File(input)
    file.cache()

    slices = OrderedDict()
    for i, line in enumerate(file.content):
        search = re.search(r"^\.\. \.(\w+)", line)
        if search:
            slices[search.groups()[0]] = i + SLICE_ATTRIBUTE_INDENT

    index = 0
    for slice, sliceStart in slices.iteritems():
        sliceFile = File(os.path.join(output, "{0}.{1}".format(slice, OUTPUT_FILES_EXTENSION)))
        LOGGER.info("{0} | Outputing '{1}' file!".format(sliceReStructuredText.__name__, sliceFile.path))
        sliceEnd = (
            index < (len(slices.values()) - 1)
            and slices.values()[index + 1] - SLICE_ATTRIBUTE_INDENT
            or len(file.content)
        )

        for i in range(sliceStart, sliceEnd):
            skipLine = False
            for item in CONTENT_DELETION:
                if re.search(item, file.content[i]):
                    LOGGER.info(
                        "{0} | Skipping Line '{1}' with '{2}' content!".format(sliceReStructuredText.__name__, i, item)
                    )
                    skipLine = True
                    break

            if skipLine:
                continue

            line = file.content[i]
            for pattern, value in STATEMENT_SUBSTITUTE.iteritems():
                line = re.sub(pattern, value, line)

            search = re.search(r"-  `[\w ]+`_ \(([\w\.]+)\)", line)
            if search:
                LOGGER.info(
                    "{0} | Updating Line '{1}' link: '{2}'!".format(
                        sliceReStructuredText.__name__, i, search.groups()[0]
                    )
                )
                line = "-  :ref:`{0}`\n".format(search.groups()[0])
            sliceFile.content.append(line)

        sliceFile.write()
        index += 1

    return True
def getSphinxDocumentationApi(packages, cloneDirectory, outputDirectory,
                              apiFile):
    """
	This definition gets Sphinx documentation API.

	:param packages: Packages. ( String )
	:param cloneDirectory: Source clone directory. ( String )
	:param outputDirectory: Content directory. ( String )
	:param apiFile: API file. ( String )
	"""

    LOGGER.info("{0} | Building Sphinx documentation API!".format(
        getSphinxDocumentationApi.__name__))

    if os.path.exists(cloneDirectory):
        shutil.rmtree(cloneDirectory)
        os.makedirs(cloneDirectory)

    packagesModules = {"apiModules": [], "testsModules": []}
    for package in packages.split(","):
        package = __import__(package)
        path = foundations.common.getFirstItem(package.__path__)
        sourceDirectory = os.path.dirname(path)

        for file in sorted(
                list(
                    foundations.walkers.filesWalker(
                        sourceDirectory,
                        filtersIn=("{0}.*\.ui$".format(path), )))):
            LOGGER.info("{0} | Ui file: '{1}'".format(
                getSphinxDocumentationApi.__name__, file))
            targetDirectory = os.path.dirname(file).replace(
                sourceDirectory, "")
            directory = "{0}{1}".format(cloneDirectory, targetDirectory)
            if not foundations.common.pathExists(directory):
                os.makedirs(directory)
            source = os.path.join(directory, os.path.basename(file))
            shutil.copyfile(file, source)

        modules = []
        for file in sorted(
                list(
                    foundations.walkers.filesWalker(
                        sourceDirectory,
                        filtersIn=("{0}.*\.py$".format(path), ),
                        filtersOut=EXCLUDED_PYTHON_MODULES))):
            LOGGER.info("{0} | Python file: '{1}'".format(
                getSphinxDocumentationApi.__name__, file))
            module = "{0}.{1}".format(
                (".".join(
                    os.path.dirname(file).replace(sourceDirectory,
                                                  "").split("/"))),
                foundations.strings.getSplitextBasename(file)).strip(".")
            LOGGER.info("{0} | Module name: '{1}'".format(
                getSphinxDocumentationApi.__name__, module))
            directory = os.path.dirname(
                os.path.join(cloneDirectory, module.replace(".", "/")))
            if not foundations.common.pathExists(directory):
                os.makedirs(directory)
            source = os.path.join(directory, os.path.basename(file))
            shutil.copyfile(file, source)

            sourceFile = File(source)
            sourceFile.cache()
            trimFromIndex = trimEndIndex = None
            inMultilineString = inDecorator = False
            for i, line in enumerate(sourceFile.content):
                if re.search(r"__name__ +\=\= +\"__main__\"", line):
                    trimFromIndex = i
                for pattern, value in CONTENT_SUBSTITUTIONS.iteritems():
                    if re.search(pattern, line):
                        sourceFile.content[i] = re.sub(pattern, value, line)

                strippedLine = line.strip()
                if re.search(r"^\"\"\"", strippedLine):
                    inMultilineString = not inMultilineString

                if inMultilineString:
                    continue

                if re.search(r"^@\w+", strippedLine) and \
                not re.search(r"@property", strippedLine) and \
                not re.search(r"^@\w+\.setter", strippedLine) and \
                not re.search(r"^@\w+\.deleter", strippedLine):
                    inDecorator = True
                    indent = re.search(r"^([ \t]*)", line)

                if re.search(r"^[ \t]*def \w+", sourceFile.content[i]) or \
                 re.search(r"^[ \t]*class \w+", sourceFile.content[i]):
                    inDecorator = False

                if not inDecorator:
                    continue

                sourceFile.content[i] = "{0}{1} {2}".format(
                    indent.groups()[0], DECORATORS_COMMENT_MESSAGE, line)

            if trimFromIndex:
                LOGGER.info("{0} | Trimming '__main__' statements!".format(
                    getSphinxDocumentationApi.__name__))
                content = [sourceFile.content[i] for i in range(trimFromIndex)]
                content.append("{0}\n".format(STATEMENTS_UPDATE_MESSAGGE))
                sourceFile.content = content
            sourceFile.write()

            if "__init__.py" in file:
                continue

            rstFilePath = "{0}{1}".format(module, FILES_EXTENSION)
            LOGGER.info("{0} | Building API file: '{1}'".format(
                getSphinxDocumentationApi.__name__, rstFilePath))
            rstFile = File(os.path.join(outputDirectory, rstFilePath))
            header = [
                "_`{0}`\n".format(module),
                "==={0}\n".format("=" * len(module)), "\n",
                ".. automodule:: {0}\n".format(module), "\n"
            ]
            rstFile.content.extend(header)

            functions = OrderedDict()
            classes = OrderedDict()
            moduleAttributes = OrderedDict()
            for member, object in moduleBrowser._readmodule(
                    module, [
                        source,
                    ]).iteritems():
                if object.__class__ == moduleBrowser.Function:
                    if not member.startswith("_"):
                        functions[member] = [
                            ".. autofunction:: {0}\n".format(member)
                        ]
                elif object.__class__ == moduleBrowser.Class:
                    classes[member] = [
                        ".. autoclass:: {0}\n".format(member),
                        "	:show-inheritance:\n", "	:members:\n"
                    ]
                elif object.__class__ == moduleBrowser.Global:
                    if not member.startswith("_"):
                        moduleAttributes[member] = [
                            ".. attribute:: {0}.{1}\n".format(module, member)
                        ]

            moduleAttributes and rstFile.content.append(
                "Module Attributes\n-----------------\n\n")
            for moduleAttribute in moduleAttributes.itervalues():
                rstFile.content.extend(moduleAttribute)
                rstFile.content.append("\n")

            functions and rstFile.content.append("Functions\n---------\n\n")
            for function in functions.itervalues():
                rstFile.content.extend(function)
                rstFile.content.append("\n")

            classes and rstFile.content.append("Classes\n-------\n\n")
            for class_ in classes.itervalues():
                rstFile.content.extend(class_)
                rstFile.content.append("\n")

            rstFile.write()
            modules.append(module)

        packagesModules["apiModules"].extend(
            [module for module in modules if not "tests" in module])
        packagesModules["testsModules"].extend(
            [module for module in modules if "tests" in module])

    apiFile = File(apiFile)
    apiFile.content.extend(TOCTREE_TEMPLATE_BEGIN)
    for module in packagesModules["apiModules"]:
        apiFile.content.append("   {0} <{1}>\n".format(
            module, "api/{0}".format(module)))
    for module in packagesModules["testsModules"]:
        apiFile.content.append("   {0} <{1}>\n".format(
            module, "api/{0}".format(module)))
    apiFile.content.extend(TOCTREE_TEMPLATE_END)
    apiFile.write()
Ejemplo n.º 41
0
def buildApi(packages, input, output, sanitizer, excludedModules=None):
    """
	Builds the Sphinx documentation API.

	:param packages: Packages to include in the API.
	:type packages: list
	:param input: Input modules directory.
	:type input: unicode
	:param output: Output reStructuredText files directory.
	:type output: unicode
	:param sanitizer: Sanitizer python module.
	:type sanitizer: unicode
	:param excludedModules: Excluded modules.
	:type excludedModules: list
	:return: Definition success.
	:rtype: bool
	"""

    LOGGER.info("{0} | Building Sphinx documentation API!".format(
        buildApi.__name__))

    sanitizer = importSanitizer(sanitizer)

    if os.path.exists(input):
        shutil.rmtree(input)
        os.makedirs(input)

    excludedModules = [] if excludedModules is None else excludedModules

    packagesModules = {"apiModules": [], "testsModules": []}
    for package in packages:
        package = __import__(package)
        path = foundations.common.getFirstItem(package.__path__)
        packageDirectory = os.path.dirname(path)

        for file in sorted(
                list(
                    foundations.walkers.filesWalker(
                        packageDirectory,
                        filtersIn=("{0}.*\.ui$".format(path), )))):
            LOGGER.info("{0} | Ui file: '{1}'".format(buildApi.__name__, file))
            targetDirectory = os.path.dirname(file).replace(
                packageDirectory, "")
            directory = "{0}{1}".format(input, targetDirectory)
            if not foundations.common.pathExists(directory):
                os.makedirs(directory)
            source = os.path.join(directory, os.path.basename(file))
            shutil.copyfile(file, source)

        modules = []
        for file in sorted(
                list(
                    foundations.walkers.filesWalker(
                        packageDirectory,
                        filtersIn=("{0}.*\.py$".format(path), ),
                        filtersOut=excludedModules))):
            LOGGER.info("{0} | Python file: '{1}'".format(
                buildApi.__name__, file))
            module = "{0}.{1}".format(
                (".".join(
                    os.path.dirname(file).replace(packageDirectory,
                                                  "").split("/"))),
                foundations.strings.getSplitextBasename(file)).strip(".")
            LOGGER.info("{0} | Module name: '{1}'".format(
                buildApi.__name__, module))
            directory = os.path.dirname(
                os.path.join(input, module.replace(".", "/")))
            if not foundations.common.pathExists(directory):
                os.makedirs(directory)
            source = os.path.join(directory, os.path.basename(file))
            shutil.copyfile(file, source)

            sanitizer.bleach(source)

            if "__init__.py" in file:
                continue

            rstFilePath = "{0}{1}".format(module, FILES_EXTENSION)
            LOGGER.info("{0} | Building API file: '{1}'".format(
                buildApi.__name__, rstFilePath))
            rstFile = File(os.path.join(output, rstFilePath))
            header = [
                "_`{0}`\n".format(module),
                "==={0}\n".format("=" * len(module)), "\n",
                ".. automodule:: {0}\n".format(module), "\n"
            ]
            rstFile.content.extend(header)

            functions = OrderedDict()
            classes = OrderedDict()
            moduleAttributes = OrderedDict()
            for member, object in moduleBrowser._readmodule(
                    module, [
                        source,
                    ]).iteritems():
                if object.__class__ == moduleBrowser.Function:
                    if not member.startswith("_"):
                        functions[member] = [
                            ".. autofunction:: {0}\n".format(member)
                        ]
                elif object.__class__ == moduleBrowser.Class:
                    classes[member] = [
                        ".. autoclass:: {0}\n".format(member),
                        "	:show-inheritance:\n", "	:members:\n"
                    ]
                elif object.__class__ == moduleBrowser.Global:
                    if not member.startswith("_"):
                        moduleAttributes[member] = [
                            ".. attribute:: {0}.{1}\n".format(module, member)
                        ]

            moduleAttributes and rstFile.content.append(
                "Module Attributes\n-----------------\n\n")
            for moduleAttribute in moduleAttributes.itervalues():
                rstFile.content.extend(moduleAttribute)
                rstFile.content.append("\n")

            functions and rstFile.content.append("Functions\n---------\n\n")
            for function in functions.itervalues():
                rstFile.content.extend(function)
                rstFile.content.append("\n")

            classes and rstFile.content.append("Classes\n-------\n\n")
            for class_ in classes.itervalues():
                rstFile.content.extend(class_)
                rstFile.content.append("\n")

            rstFile.write()
            modules.append(module)

        packagesModules["apiModules"].extend(
            [module for module in modules if not "tests" in module])
        packagesModules["testsModules"].extend(
            [module for module in modules if "tests" in module])

    apiFile = File("{0}{1}".format(output, FILES_EXTENSION))
    apiFile.content.extend(TOCTREE_TEMPLATE_BEGIN)
    for module in packagesModules["apiModules"]:
        apiFile.content.append("   {0} <{1}>\n".format(
            module, "api/{0}".format(module)))
    for module in packagesModules["testsModules"]:
        apiFile.content.append("   {0} <{1}>\n".format(
            module, "api/{0}".format(module)))
    apiFile.content.extend(TOCTREE_TEMPLATE_END)
    apiFile.write()

    return True