示例#1
0
def getTree(fileDb, fileId, options):
    if not fileDb[fileId].has_key("tree"):
        if options.verbose:
            print "    - Generating tree for %s..." % fileId

        useCache = False
        loadCache = False

        fileEntry = fileDb[fileId]
        filePath = fileEntry["path"]

        if options.cacheDirectory != None:
            cachePath = os.path.join(filetool.normalize(options.cacheDirectory), fileId + "-tree.pcl")
            useCache = True

            if not filetool.checkCache(filePath, cachePath):
                loadCache = True

        if loadCache:
            tree = filetool.readCache(cachePath)
        else:
            tree = treegenerator.createSyntaxTree(getTokens(fileDb, fileId, options))

            if useCache:
                if options.verbose:
                    print "    - Caching tree for %s..." % fileId

                filetool.storeCache(cachePath, tree)

        fileDb[fileId]["tree"] = tree

    return fileDb[fileId]["tree"]
示例#2
0
文件: loader.py 项目: eean/webrok
def getTree(fileDb, fileId, options):
    if not fileDb[fileId].has_key("tree"):
        if options.verbose:
            print "    - Generating tree for %s..." % fileId

        useCache = False
        loadCache = False

        fileEntry = fileDb[fileId]
        filePath = fileEntry["path"]

        if options.cacheDirectory != None:
            cachePath = os.path.join(filetool.normalize(options.cacheDirectory), fileId + "-tree.pcl")
            useCache = True

            if not filetool.checkCache(filePath, cachePath):
                loadCache = True

        if loadCache:
            tree = filetool.readCache(cachePath)
        else:
            tree = treegenerator.createSyntaxTree(getTokens(fileDb, fileId, options))

            if useCache:
                if options.verbose:
                    print "    - Caching tree for %s..." % fileId

                filetool.storeCache(cachePath, tree)

        fileDb[fileId]["tree"] = tree

    return fileDb[fileId]["tree"]
示例#3
0
def getStrings(fileDb, fileId, options):
  if not fileDb[fileId].has_key("strings"):
    if options.verbose:
      print "    - Searching for strings in %s..." % fileId

    useCache = False
    loadCache = False

    fileEntry = fileDb[fileId]
    filePath = fileEntry["path"]

    if options.cacheDirectory != None:
      cachePath = os.path.join(filetool.normalize(options.cacheDirectory), fileId + "-strings.pcl")
      useCache = True

      if not filetool.checkCache(filePath, cachePath, getInternalModTime(options)):
        loadCache = True

    if loadCache:
      strings = filetool.readCache(cachePath)
    else:
      strings = stringoptimizer.search(getTree(fileDb, fileId, options), options.verbose)

      if useCache:
        if options.verbose:
          print "    - Caching strings for %s..." % fileId

        filetool.storeCache(cachePath, strings)

    fileDb[fileId]["strings"] = strings

  return fileDb[fileId]["strings"]
示例#4
0
def getTokens(fileDb, fileId, options):
  if not fileDb[fileId].has_key("tokens"):
    if options.verbose:
      print "    - Generating tokens for %s..." % fileId

    useCache = False
    loadCache = False

    fileEntry = fileDb[fileId]

    filePath = fileEntry["path"]
    fileEncoding = fileEntry["encoding"]

    if options.cacheDirectory != None:
      cachePath = os.path.join(filetool.normalize(options.cacheDirectory), fileId + "-tokens.pcl")
      useCache = True

      if not filetool.checkCache(filePath, cachePath, getInternalModTime(options)):
        loadCache = True

    if loadCache:
      tokens = filetool.readCache(cachePath)
    else:
      fileContent = filetool.read(filePath, fileEncoding)
      tokens = tokenizer.parseStream(fileContent, fileId)

      if useCache:
        if options.verbose:
          print "    - Caching tokens for %s..." % fileId

        filetool.storeCache(cachePath, tokens)

    fileDb[fileId]["tokens"] = tokens

  return fileDb[fileId]["tokens"]
示例#5
0
文件: loader.py 项目: eean/webrok
def getTokens(fileDb, fileId, options):
    if not fileDb[fileId].has_key("tokens"):
        if options.verbose:
            print "    - Generating tokens for %s..." % fileId

        useCache = False
        loadCache = False

        fileEntry = fileDb[fileId]

        filePath = fileEntry["path"]
        fileEncoding = fileEntry["encoding"]

        if options.cacheDirectory != None:
            cachePath = os.path.join(filetool.normalize(options.cacheDirectory), fileId + "-tokens.pcl")
            useCache = True

            if not filetool.checkCache(filePath, cachePath):
                loadCache = True

        if loadCache:
            tokens = filetool.readCache(cachePath)
        else:
            fileContent = filetool.read(filePath, fileEncoding)

            # TODO: This hack is neccesary because the current parser cannot handle comments
            #       without a context.
            if fileDb[fileId]["meta"]:
                fileContent += "\n(function() {})()"

            tokens = tokenizer.parseStream(fileContent, fileId)

            if useCache:
                if options.verbose:
                    print "    - Caching tokens for %s..." % fileId

                filetool.storeCache(cachePath, tokens)

        fileDb[fileId]["tokens"] = tokens

    return fileDb[fileId]["tokens"]
示例#6
0
def getTokens(fileDb, fileId, options):
    if not fileDb[fileId].has_key("tokens"):
        if options.verbose:
            print "    - Generating tokens for %s..." % fileId

        useCache = False
        loadCache = False

        fileEntry = fileDb[fileId]

        filePath = fileEntry["path"]
        fileEncoding = fileEntry["encoding"]

        if options.cacheDirectory != None:
            cachePath = os.path.join(filetool.normalize(options.cacheDirectory), fileId + "-tokens.pcl")
            useCache = True

            if not filetool.checkCache(filePath, cachePath):
                loadCache = True

        if loadCache:
            tokens = filetool.readCache(cachePath)
        else:
            fileContent = filetool.read(filePath, fileEncoding)

            # TODO: This hack is neccesary because the current parser cannot handle comments
            #       without a context.
            if fileDb[fileId]["meta"]:
                fileContent += "\n(function() {})()"

            tokens = tokenizer.parseStream(fileContent, fileId)

            if useCache:
                if options.verbose:
                    print "    - Caching tokens for %s..." % fileId

                filetool.storeCache(cachePath, tokens)

        fileDb[fileId]["tokens"] = tokens

    return fileDb[fileId]["tokens"]
示例#7
0
def getTokens(fileDb, fileId, options):
    if not fileDb[fileId].has_key("tokens"):
        if options.verbose:
            print "    - Generating tokens for %s..." % fileId

        useCache = False
        loadCache = False

        fileEntry = fileDb[fileId]

        filePath = fileEntry["path"]
        fileEncoding = fileEntry["encoding"]

        if options.cacheDirectory != None:
            cachePath = os.path.join(
                filetool.normalize(options.cacheDirectory),
                fileId + "-tokens.pcl")
            useCache = True

            if not filetool.checkCache(filePath, cachePath,
                                       getInternalModTime(options)):
                loadCache = True

        if loadCache:
            tokens = filetool.readCache(cachePath)
        else:
            fileContent = filetool.read(filePath, fileEncoding)
            tokens = tokenizer.parseStream(fileContent, fileId)

            if useCache:
                if options.verbose:
                    print "    - Caching tokens for %s..." % fileId

                filetool.storeCache(cachePath, tokens)

        fileDb[fileId]["tokens"] = tokens

    return fileDb[fileId]["tokens"]
示例#8
0
def getStrings(fileDb, fileId, options):
    if not fileDb[fileId].has_key("strings"):
        if options.verbose:
            print "    - Searching for strings in %s..." % fileId

        useCache = False
        loadCache = False

        fileEntry = fileDb[fileId]
        filePath = fileEntry["path"]

        if options.cacheDirectory != None:
            cachePath = os.path.join(
                filetool.normalize(options.cacheDirectory),
                fileId + "-strings.pcl")
            useCache = True

            if not filetool.checkCache(filePath, cachePath,
                                       getInternalModTime(options)):
                loadCache = True

        if loadCache:
            strings = filetool.readCache(cachePath)
        else:
            strings = stringoptimizer.search(getTree(fileDb, fileId, options),
                                             options.verbose)

            if useCache:
                if options.verbose:
                    print "    - Caching strings for %s..." % fileId

                filetool.storeCache(cachePath, strings)

        fileDb[fileId]["strings"] = strings

    return fileDb[fileId]["strings"]
示例#9
0
文件: loader.py 项目: eean/webrok
def indexFile(
    filePath,
    filePathId,
    classPath,
    listIndex,
    classEncoding,
    classUri,
    resourceInput,
    resourceOutput,
    options,
    fileDb={},
    moduleDb={},
):

    ########################################
    # Checking cache
    ########################################

    useCache = False
    loadCache = False
    cachePath = None

    if options.cacheDirectory != None:
        cachePath = os.path.join(filetool.normalize(options.cacheDirectory), filePathId + "-entry.pcl")
        useCache = True

        if not filetool.checkCache(filePath, cachePath):
            loadCache = True

    ########################################
    # Loading file content / cache
    ########################################

    if loadCache:
        fileEntry = filetool.readCache(cachePath)
        fileId = filePathId

    else:
        fileContent = filetool.read(filePath, classEncoding)

        # Extract ID
        fileContentId = extractFileContentId(fileContent)

        # Search for valid ID
        if fileContentId == None:
            if not filePathId.endswith("__init__"):
                print "    - Could not extract ID from file: %s. Fallback to path %s!" % (filePath, filePathId)
            fileId = filePathId

        else:
            fileId = fileContentId

        if fileId != filePathId:
            print "    - ID mismatch: CONTENT=%s != PATH=%s" % (fileContentId, filePathId)
            if not options.migrateSource:
                sys.exit(1)

        fileEntry = {
            "autoDependencies": False,
            "cached": False,
            "cachePath": cachePath,
            "meta": fileId.endswith("__init__"),
            "ignoreDeps": extractIgnore(fileContent, fileId),
            "optionalDeps": extractOptional(fileContent, fileId),
            "loadtimeDeps": extractLoadtimeDeps(fileContent, fileId),
            "runtimeDeps": extractRuntimeDeps(fileContent, fileId),
            "resources": extractResources(fileContent, fileId),
            "embeds": extractEmbeds(fileContent, fileId),
            "modules": extractModules(fileContent, fileId),
        }

    ########################################
    # Additional data
    ########################################

    # We don't want to cache these items
    fileEntry["path"] = filePath
    fileEntry["pathId"] = filePathId
    fileEntry["encoding"] = classEncoding
    fileEntry["resourceInput"] = resourceInput
    fileEntry["resourceOutput"] = resourceOutput
    fileEntry["classUri"] = classUri
    fileEntry["listIndex"] = listIndex
    fileEntry["classPath"] = classPath

    ########################################
    # Registering file
    ########################################

    # Register to file database
    fileDb[fileId] = fileEntry

    # Register to module database
    for moduleId in fileEntry["modules"]:
        if moduleDb.has_key(moduleId):
            moduleDb[moduleId].append(fileId)
        else:
            moduleDb[moduleId] = [fileId]
示例#10
0
def indexFile(filePath, filePathId, classPath, listIndex, classEncoding, classUri, resourceInput, resourceOutput, options, fileDb={}, moduleDb={}):

    ########################################
    # Checking cache
    ########################################

    useCache = False
    loadCache = False
    cachePath = None

    if options.cacheDirectory != None:
        cachePath = os.path.join(filetool.normalize(options.cacheDirectory), filePathId + "-entry.pcl")
        useCache = True

        if not filetool.checkCache(filePath, cachePath):
            loadCache = True



    ########################################
    # Loading file content / cache
    ########################################

    if loadCache:
        fileEntry = filetool.readCache(cachePath)
        fileId = filePathId

    else:
        fileContent = filetool.read(filePath, classEncoding)

        # Extract ID
        fileContentId = extractFileContentId(fileContent)

        # Search for valid ID
        if fileContentId == None:
            if not filePathId.endswith("__init__"):
                print "    - Could not extract ID from file: %s. Fallback to path %s!" % (filePath, filePathId)
            fileId = filePathId

        else:
            fileId = fileContentId

        if fileId != filePathId:
            print "    - ID mismatch: CONTENT=%s != PATH=%s" % (fileContentId, filePathId)
            if not options.migrateSource:
                sys.exit(1)

        fileEntry = {
            "autoDependencies" : False,
            "cached" : False,
            "cachePath" : cachePath,
            "meta" : fileId.endswith("__init__"),
            "ignoreDeps" : extractIgnore(fileContent, fileId),
            "optionalDeps" : extractOptional(fileContent, fileId),
            "loadtimeDeps" : extractLoadtimeDeps(fileContent, fileId),
            "runtimeDeps" : extractRuntimeDeps(fileContent, fileId),
            "resources" : extractResources(fileContent, fileId),
            "embeds" : extractEmbeds(fileContent, fileId),
            "modules" : extractModules(fileContent, fileId)
        }



    ########################################
    # Additional data
    ########################################

    # We don't want to cache these items
    fileEntry["path"] = filePath
    fileEntry["pathId"] = filePathId
    fileEntry["encoding"] = classEncoding
    fileEntry["resourceInput"] = resourceInput
    fileEntry["resourceOutput"] = resourceOutput
    fileEntry["classUri"] = classUri
    fileEntry["listIndex"] = listIndex
    fileEntry["classPath"] = classPath


    ########################################
    # Registering file
    ########################################

    # Register to file database
    fileDb[fileId] = fileEntry

    # Register to module database
    for moduleId in fileEntry["modules"]:
        if moduleDb.has_key(moduleId):
            moduleDb[moduleId].append(fileId)
        else:
            moduleDb[moduleId] = [fileId]