def getTree(fileDb, fileId, options): if not fileDb[fileId].has_key("tree"): if options.verbose: print " - Generating tree for %s..." % fileId useCache = False loadCache = False fileEntry = fileDb[fileId] filePath = fileEntry["path"] if options.cacheDirectory != None: cachePath = os.path.join(filetool.normalize(options.cacheDirectory), fileId + "-tree.pcl") useCache = True if not filetool.checkCache(filePath, cachePath): loadCache = True if loadCache: tree = filetool.readCache(cachePath) else: tree = treegenerator.createSyntaxTree(getTokens(fileDb, fileId, options)) if useCache: if options.verbose: print " - Caching tree for %s..." % fileId filetool.storeCache(cachePath, tree) fileDb[fileId]["tree"] = tree return fileDb[fileId]["tree"]
def storeEntryCache(fileDb, options): cacheCounter = 0 ignoreDbEntries = ["tokens", "tree", "path", "pathId", "encoding", "resourceInput", "resourceOutput", "listIndex", "classPath", "classUri"] for fileId in fileDb: fileEntry = fileDb[fileId] if fileEntry["cached"] == True: continue # Store flag fileEntry["cached"] = True # Copy entries fileEntryCopy = {} for key in fileEntry: if not key in ignoreDbEntries: fileEntryCopy[key] = fileEntry[key] filetool.storeCache(fileEntry["cachePath"], fileEntryCopy) cacheCounter += 1 if cacheCounter == 0: print " * No classes were modified" else: print " * %s classes were modified" % cacheCounter
def getStrings(fileDb, fileId, options): if not fileDb[fileId].has_key("strings"): if options.verbose: print " - Searching for strings in %s..." % fileId useCache = False loadCache = False fileEntry = fileDb[fileId] filePath = fileEntry["path"] if options.cacheDirectory != None: cachePath = os.path.join(filetool.normalize(options.cacheDirectory), fileId + "-strings.pcl") useCache = True if not filetool.checkCache(filePath, cachePath, getInternalModTime(options)): loadCache = True if loadCache: strings = filetool.readCache(cachePath) else: strings = stringoptimizer.search(getTree(fileDb, fileId, options), options.verbose) if useCache: if options.verbose: print " - Caching strings for %s..." % fileId filetool.storeCache(cachePath, strings) fileDb[fileId]["strings"] = strings return fileDb[fileId]["strings"]
def getTokens(fileDb, fileId, options): if not fileDb[fileId].has_key("tokens"): if options.verbose: print " - Generating tokens for %s..." % fileId useCache = False loadCache = False fileEntry = fileDb[fileId] filePath = fileEntry["path"] fileEncoding = fileEntry["encoding"] if options.cacheDirectory != None: cachePath = os.path.join(filetool.normalize(options.cacheDirectory), fileId + "-tokens.pcl") useCache = True if not filetool.checkCache(filePath, cachePath, getInternalModTime(options)): loadCache = True if loadCache: tokens = filetool.readCache(cachePath) else: fileContent = filetool.read(filePath, fileEncoding) tokens = tokenizer.parseStream(fileContent, fileId) if useCache: if options.verbose: print " - Caching tokens for %s..." % fileId filetool.storeCache(cachePath, tokens) fileDb[fileId]["tokens"] = tokens return fileDb[fileId]["tokens"]
def getTokens(fileDb, fileId, options): if not fileDb[fileId].has_key("tokens"): if options.verbose: print " - Generating tokens for %s..." % fileId useCache = False loadCache = False fileEntry = fileDb[fileId] filePath = fileEntry["path"] fileEncoding = fileEntry["encoding"] if options.cacheDirectory != None: cachePath = os.path.join(filetool.normalize(options.cacheDirectory), fileId + "-tokens.pcl") useCache = True if not filetool.checkCache(filePath, cachePath): loadCache = True if loadCache: tokens = filetool.readCache(cachePath) else: fileContent = filetool.read(filePath, fileEncoding) # TODO: This hack is neccesary because the current parser cannot handle comments # without a context. if fileDb[fileId]["meta"]: fileContent += "\n(function() {})()" tokens = tokenizer.parseStream(fileContent, fileId) if useCache: if options.verbose: print " - Caching tokens for %s..." % fileId filetool.storeCache(cachePath, tokens) fileDb[fileId]["tokens"] = tokens return fileDb[fileId]["tokens"]
def getTokens(fileDb, fileId, options): if not fileDb[fileId].has_key("tokens"): if options.verbose: print " - Generating tokens for %s..." % fileId useCache = False loadCache = False fileEntry = fileDb[fileId] filePath = fileEntry["path"] fileEncoding = fileEntry["encoding"] if options.cacheDirectory != None: cachePath = os.path.join( filetool.normalize(options.cacheDirectory), fileId + "-tokens.pcl") useCache = True if not filetool.checkCache(filePath, cachePath, getInternalModTime(options)): loadCache = True if loadCache: tokens = filetool.readCache(cachePath) else: fileContent = filetool.read(filePath, fileEncoding) tokens = tokenizer.parseStream(fileContent, fileId) if useCache: if options.verbose: print " - Caching tokens for %s..." % fileId filetool.storeCache(cachePath, tokens) fileDb[fileId]["tokens"] = tokens return fileDb[fileId]["tokens"]
def getStrings(fileDb, fileId, options): if not fileDb[fileId].has_key("strings"): if options.verbose: print " - Searching for strings in %s..." % fileId useCache = False loadCache = False fileEntry = fileDb[fileId] filePath = fileEntry["path"] if options.cacheDirectory != None: cachePath = os.path.join( filetool.normalize(options.cacheDirectory), fileId + "-strings.pcl") useCache = True if not filetool.checkCache(filePath, cachePath, getInternalModTime(options)): loadCache = True if loadCache: strings = filetool.readCache(cachePath) else: strings = stringoptimizer.search(getTree(fileDb, fileId, options), options.verbose) if useCache: if options.verbose: print " - Caching strings for %s..." % fileId filetool.storeCache(cachePath, strings) fileDb[fileId]["strings"] = strings return fileDb[fileId]["strings"]