def searchForDependencies(self, projectDirectory): """Search for dependencies in a project. Returns dictionary where the keys are the packages/modules in the project and the values are packages/modules that the respective key imported. Arguments: projectDirectory -- Absolute path to the root directory of the project to search for dependencies in. """ if not os.path.isdir(projectDirectory): raise IOError("'{}' is not a valid directory".format(projectDirectory)) # Important to make the project directory an absolute path projectDirectory = os.path.abspath(projectDirectory) # Extract dependencies for the project directory searcher = FileSearcher(True) filterers = [ ExtensionFilterer(["py"]) ] extractor = ModuleDependencyExtractor() processor = FileProcessor(searcher, filterers, extractor) dependencies = processor.process(projectDirectory) # Resolve relative imports resolver = ImportResolver(projectDirectory) dependencies = resolver.resolveImports(dependencies) # Finally, apply a whitelist to the dependencies to only # include modules that belong to the scanned project whitelistApplier = WhitelistApplier() return whitelistApplier.applyToProject(projectDirectory, dependencies)
def build(self): f = FileProcessor() manager = ScreenManager() manager.add_widget(LoginWindow(name="Login")) manager.add_widget(DashboardWindow(name="Dashboard")) f.processFiles() return manager
def main(directory, showAll): """Run image resource extraction process. Arguments: directoriesToSearch -- List containing all of the directories containing web page soure code that need to be scanned showAll -- If set to True, then all scanned files will be displayed, even if no image URLs were extracted from them. This means if this is False, then any files where no data was found are omitted. """ # Build components to use for file processor searcher = searchers.FileSearcher(True) filterer = filterers.ExtensionFilterer( ["html", "htm", "shtml", "php", "css", "js"] ) extractor = ImageURLExtractor() processor = FileProcessor(searcher, [ filterer ], extractor) # Perform the URL extraction and display findings extractedURLs = processor.process(directoriesToSearch) for filename, imageURLs in extractedURLs.items(): # If nothing was found in this file and the # approrpiate flag is set, skip this file if len(imageURLs) == 0 and not showAll: continue imageURLLines = "" for url in imageURLs: imageURLLines += "\t{}\n".format(url) # Remove last newline if len(imageURLLines) > 0: imageURLLines = imageURLLines[:-1] print("{}\n{}".format(filename, imageURLLines))
def main(directoriesToSearch): """Run checksum generation process. Arguments: directoriesToSearch -- List containing all of the directories containing files to generate checksums for """ # Build components to use for file processor searcher = searchers.FileSearcher(True) extractor = ChecksumGenerator() processor = FileProcessor(searcher, [], extractor) # Perofrm checksum generation and display every checksum generatedChecksums = processor.process(directoriesToSearch) for filename, checksum in generatedChecksums.items(): print("{}\n\t{}".format(filename, checksum))
def ExportToExcel(self, path, expIds): # Trigger the worker thread unless it's already busy if not self.worker: #self.status.SetLabel('Starting computation') self.worker = FileProcessor(self, path, expIds) max = 100 self.progress = wx.ProgressDialog( "Export in Progress", "Preparing ...", maximum=max, parent=self, style=0 | wx.PD_APP_MODAL | wx.PD_CAN_ABORT #| wx.PD_CAN_SKIP #| wx.PD_ELAPSED_TIME | wx.PD_ESTIMATED_TIME | wx.PD_REMAINING_TIME #| wx.PD_AUTO_HIDE )