def getTree(fileDb, fileId, options): if not fileDb[fileId].has_key("tree"): if options.verbose: print " - Generating tree for %s..." % fileId useCache = False loadCache = False fileEntry = fileDb[fileId] filePath = fileEntry["path"] if options.cacheDirectory != None: cachePath = os.path.join(filetool.normalize(options.cacheDirectory), fileId + "-tree.pcl") useCache = True if not filetool.checkCache(filePath, cachePath): loadCache = True if loadCache: tree = filetool.readCache(cachePath) else: tree = treegenerator.createSyntaxTree(getTokens(fileDb, fileId, options)) if useCache: if options.verbose: print " - Caching tree for %s..." % fileId filetool.storeCache(cachePath, tree) fileDb[fileId]["tree"] = tree return fileDb[fileId]["tree"]
def indexClassPath(classPath, listIndex, options, fileDb={}, moduleDb={}): classPath = filetool.normalize(classPath) counter = 0 # Search for other indexed lists if len(options.classEncoding) > listIndex: classEncoding = options.classEncoding[listIndex] else: classEncoding = "utf-8" if len(options.classUri) > listIndex: classUri = options.classUri[listIndex] else: classUri = None if len(options.resourceInput) > listIndex: resourceInput = options.resourceInput[listIndex] else: resourceInput = None if len(options.resourceOutput) > listIndex: resourceOutput = options.resourceOutput[listIndex] else: resourceOutput = None for root, dirs, files in os.walk(classPath): # Filter ignored directories for ignoredDir in DIRIGNORE: if ignoredDir in dirs: dirs.remove(ignoredDir) # Searching for files for fileName in files: if os.path.splitext(fileName)[1] == JSEXT and not fileName.startswith("."): filePath = os.path.join(root, fileName) filePathId = filePath.replace(classPath + os.sep, "").replace(JSEXT, "").replace(os.sep, ".") indexFile( filePath, filePathId, classPath, listIndex, classEncoding, classUri, resourceInput, resourceOutput, options, fileDb, moduleDb, ) counter += 1 return counter
def indexClassPath(classPath, listIndex, options, fileDb={}, moduleDb={}): classPath = filetool.normalize(classPath) counter = 0 # Search for other indexed lists if len(options.classEncoding) > listIndex: classEncoding = options.classEncoding[listIndex] else: classEncoding = "utf-8" if len(options.classUri) > listIndex: classUri = options.classUri[listIndex] else: classUri = None if len(options.resourceInput) > listIndex: resourceInput = options.resourceInput[listIndex] else: resourceInput = None if len(options.resourceOutput) > listIndex: resourceOutput = options.resourceOutput[listIndex] else: resourceOutput = None for root, dirs, files in os.walk(classPath): # Filter ignored directories for ignoredDir in DIRIGNORE: if ignoredDir in dirs: dirs.remove(ignoredDir) # Searching for files for fileName in files: if os.path.splitext( fileName)[1] == JSEXT and not fileName.startswith("."): filePath = os.path.join(root, fileName) filePathId = filePath.replace(classPath + os.sep, "").replace( JSEXT, "").replace(os.sep, ".") indexFile(filePath, filePathId, classPath, listIndex, classEncoding, classUri, resourceInput, resourceOutput, options, fileDb, moduleDb) counter += 1 return counter
def getTokens(fileDb, fileId, options): if not fileDb[fileId].has_key("tokens"): if options.verbose: print " - Generating tokens for %s..." % fileId useCache = False loadCache = False fileEntry = fileDb[fileId] filePath = fileEntry["path"] fileEncoding = fileEntry["encoding"] if options.cacheDirectory != None: cachePath = os.path.join(filetool.normalize(options.cacheDirectory), fileId + "-tokens.pcl") useCache = True if not filetool.checkCache(filePath, cachePath): loadCache = True if loadCache: tokens = filetool.readCache(cachePath) else: fileContent = filetool.read(filePath, fileEncoding) # TODO: This hack is neccesary because the current parser cannot handle comments # without a context. if fileDb[fileId]["meta"]: fileContent += "\n(function() {})()" tokens = tokenizer.parseStream(fileContent, fileId) if useCache: if options.verbose: print " - Caching tokens for %s..." % fileId filetool.storeCache(cachePath, tokens) fileDb[fileId]["tokens"] = tokens return fileDb[fileId]["tokens"]
def indexFile(filePath, filePathId, classPath, listIndex, classEncoding, classUri, resourceInput, resourceOutput, options, fileDb={}, moduleDb={}): ######################################## # Checking cache ######################################## useCache = False loadCache = False cachePath = None if options.cacheDirectory != None: cachePath = os.path.join(filetool.normalize(options.cacheDirectory), filePathId + "-entry.pcl") useCache = True if not filetool.checkCache(filePath, cachePath): loadCache = True ######################################## # Loading file content / cache ######################################## if loadCache: fileEntry = filetool.readCache(cachePath) fileId = filePathId else: fileContent = filetool.read(filePath, classEncoding) # Extract ID fileContentId = extractFileContentId(fileContent) # Search for valid ID if fileContentId == None: if not filePathId.endswith("__init__"): print " - Could not extract ID from file: %s. Fallback to path %s!" % (filePath, filePathId) fileId = filePathId else: fileId = fileContentId if fileId != filePathId: print " - ID mismatch: CONTENT=%s != PATH=%s" % (fileContentId, filePathId) if not options.migrateSource: sys.exit(1) fileEntry = { "autoDependencies" : False, "cached" : False, "cachePath" : cachePath, "meta" : fileId.endswith("__init__"), "ignoreDeps" : extractIgnore(fileContent, fileId), "optionalDeps" : extractOptional(fileContent, fileId), "loadtimeDeps" : extractLoadtimeDeps(fileContent, fileId), "runtimeDeps" : extractRuntimeDeps(fileContent, fileId), "resources" : extractResources(fileContent, fileId), "embeds" : extractEmbeds(fileContent, fileId), "modules" : extractModules(fileContent, fileId) } ######################################## # Additional data ######################################## # We don't want to cache these items fileEntry["path"] = filePath fileEntry["pathId"] = filePathId fileEntry["encoding"] = classEncoding fileEntry["resourceInput"] = resourceInput fileEntry["resourceOutput"] = resourceOutput fileEntry["classUri"] = classUri fileEntry["listIndex"] = listIndex fileEntry["classPath"] = classPath ######################################## # Registering file ######################################## # Register to file database fileDb[fileId] = fileEntry # Register to module database for moduleId in fileEntry["modules"]: if moduleId in moduleDb: moduleDb[moduleId].append(fileId) else: moduleDb[moduleId] = [fileId]