コード例 #1
0
ファイル: MClassCode.py プロジェクト: lite/empweb
    def getCode(self, compOptions, treegen=treegenerator, featuremap={}):

        # source versions
        if not compOptions.optimize:
            compiled = filetool.read(self.path)
            if compOptions.format and compiled[
                    -1:] != "\n":  # assure trailing \n
                compiled += '\n'
        # compiled versions
        else:
            optimize = compOptions.optimize
            variants = compOptions.variantset
            format_ = compOptions.format
            classVariants = self.classVariants()
            # relevantVariants is the intersection between the variant set of this job
            # and the variant keys actually used in the class
            relevantVariants = self.projectClassVariantsToCurrent(
                classVariants, variants)
            variantsId = util.toString(relevantVariants)
            optimizeId = self._optimizeId(optimize)
            cache = self.context["cache"]

            cacheId = "compiled-%s-%s-%s-%s" % (self.path, variantsId,
                                                optimizeId, format_)
            compiled, _ = cache.read(cacheId, self.path)

            if compiled == None:
                tree = self.optimize(None, optimize, variants, featuremap)
                compiled = self.serializeTree(tree, optimize, format_)
                if not "statics" in optimize:
                    cache.write(cacheId, compiled)

        return compiled
コード例 #2
0
    def getMeta(self, fileId):
        fileEntry = self._classes[fileId]
        filePath = fileEntry["path"]
        cacheId = "meta-%s" % fileId

        meta = self._cache.readmulti(cacheId, filePath)
        if meta != None:
            return meta

        meta = {}

        self._console.indent()

        content = filetool.read(filePath, fileEntry["encoding"])

        meta["loadtimeDeps"] = self._extractLoadtimeDeps(content, fileId)
        meta["runtimeDeps"] = self._extractRuntimeDeps(content, fileId)
        meta["optionalDeps"] = self._extractOptionalDeps(content)
        meta["ignoreDeps"] = self._extractIgnoreDeps(content)
        meta["assetDeps"] = self._extractAssetDeps(content)

        self._console.outdent()

        self._cache.writemulti(cacheId, meta)
        return meta
コード例 #3
0
ファイル: iconpackager.py プロジェクト: dominikg/qooxdoo
def getData():
    data = os.path.join(filetool.root(), os.pardir, "data", "icon", "qooxdoo.dat")
    lines = filetool.read(data).split("\n")
    result = {}

    for line in lines:
        if line == "" or line.startswith(" ") or line.startswith("#"):
            continue

        if ":" in line:
            alternative = line[line.index(":")+1:].split(",")
            key = line[:line.index(":")]
        else:
            alternative = []
            key = line

        if key in result:
            console.error("Duplicate key found: %s" % key)
            sys.exit(1)

        result[key] = alternative

    # convert to array
    arr = []
    keys = result.keys()
    keys.sort()
    for key in keys:
        tmp = []
        tmp.append(key)
        tmp.extend(result[key])
        arr.append(tmp)

    return arr
コード例 #4
0
ファイル: iconpackager.py プロジェクト: emtee40/testingazuan
def getData():
    data = os.path.join(filetool.root(), os.pardir, "data", "icon",
                        "qooxdoo.dat")
    lines = filetool.read(data).split("\n")
    result = {}

    for line in lines:
        if line == "" or line.startswith(" ") or line.startswith("#"):
            continue

        if ":" in line:
            alternative = line[line.index(":") + 1:].split(",")
            key = line[:line.index(":")]
        else:
            alternative = []
            key = line

        if result.has_key(key):
            console.error("Duplicate key found: %s" % key)
            sys.exit(1)

        result[key] = alternative

    # convert to array
    arr = []
    keys = result.keys()
    keys.sort()
    for key in keys:
        tmp = []
        tmp.append(key)
        tmp.extend(result[key])
        arr.append(tmp)

    return arr
コード例 #5
0
 def toResinfo(self):
     result = super(self.__class__, self).toResinfo()
     if self.format == "b64" and self.path:
         cont = filetool.read(self.path)
         cont = json.loads(cont)
         result.append(cont)
     return result
コード例 #6
0
 def loaderTemplate(script, compConf):
     templatePath = compConf.get("paths/loader-template", None)
     if not templatePath:
         # use default template
         templatePath = os.path.join(filetool.root(), os.pardir, "data", "generator", "loader.tmpl.js")
     templateCont = filetool.read(templatePath)
     return templateCont, templatePath
コード例 #7
0
 def toResinfo(self):
     result = super(self.__class__, self).toResinfo()
     if self.format == "b64" and self.path:
         cont = filetool.read(self.path)
         cont = json.loads(cont)
         result.append(cont)
     return result
コード例 #8
0
ファイル: MClassCode.py プロジェクト: websales/qooxdoo
    def tree(self, treegen=treegenerator, force=False):

        cache = self.context['cache']
        console = self.context['console']
        tradeSpaceForSpeed = False  # Caution: setting this to True seems to make builds slower, at least on some platforms!?
        cacheId = "tree%s-%s-%s" % (treegen.tag, self.path, util.toString({}))
        self.treeId = cacheId

        # Lookup for unoptimized tree
        tree, _ = cache.read(cacheId, self.path, memory=tradeSpaceForSpeed)

        # Tree still undefined?, create it!
        if tree == None or force:
            console.debug("Parsing file: %s..." % self.id)
            console.indent()

            fileContent = filetool.read(self.path, self.encoding)
            tokens = tokenizer.parseStream(fileContent, self.id)
            
            console.outdent()
            console.debug("Generating tree: %s..." % self.id)
            console.indent()
            tree = treegen.createSyntaxTree(tokens)  # allow exceptions to propagate

            # store unoptimized tree
            #print "Caching %s" % cacheId
            cache.write(cacheId, tree, memory=tradeSpaceForSpeed)

            console.outdent()
        return tree
コード例 #9
0
ファイル: migrator.py プロジェクト: peuter/qooxdoo
def migrateFile(filePath, compiledPatches, compiledInfos, patchFile, options=None, encoding="UTF-8"):

    logging.info("  - File: %s" % filePath)

    # Read in original content
    fileContent = filetool.read(filePath, encoding)

    fileId = extractFileContentId(fileContent)

    # Apply patches
    patchedContent = fileContent

    if patchFile and fileId is not None:

        # import patch
        patch = {}
        execfile(patchFile, patch)
        tree = treegenerator.createFileTree(tokenizer.Tokenizer().parseStream(fileContent))

        # If there were any changes, compile the result
        if patch["patch"](fileId, tree):
            options.prettyPrint = True  # make sure it's set
            result = [u""]
            # result = pretty.prettyNode(tree, options, result)
            result = formatter_.formatNode(tree, options, result)
            patchedContent = u"".join(result)

    # apply RE patches
    patchedContent = regtool(patchedContent, compiledPatches, True, filePath)
    patchedContent = regtool(patchedContent, compiledInfos, False, filePath)

    # Write file
    if patchedContent != fileContent:
        logging.info("    - %s has been modified. Storing modifications ..." % filePath)
        filetool.save(filePath, patchedContent, encoding)
コード例 #10
0
ファイル: compile.py プロジェクト: woongsikchoi/qooxdoo
def main():
    (options, args) = get_args()

    if len(args) == 0:
        print(">>> Missing filename!")
        return

    for fileName in args:
        if not options.quiet:
            print(fileName, ":")
            print(">>> Parsing file...")

        if fileName == "-":
            # enables: echo "1+2" | compile.py -
            fileContent = sys.stdin.read()
        else:
            fileContent = filetool.read(fileName, "utf-8")

        if options.config:
            read_config(options)

        if options.lint:
            run_lint(fileName, fileContent, options, args)
        elif options.pretty:
            run_pretty(fileName, fileContent, options, args)
        elif options.tree:
            run_tree(fileName, fileContent, options, args)
        elif options.dependencies:
            run_dependencies(fileName, fileContent, options, args)
        else:
            run_compile(fileName, fileContent, options, args)

    return
コード例 #11
0
    def getMeta(self, fileId):
        fileEntry = self._classes[fileId]
        filePath = fileEntry["path"]
        cacheId = "meta-%s" % fileId

        meta = self._cache.readmulti(cacheId, filePath)
        if meta != None:
            return meta

        meta = {}

        self._console.indent()

        content = filetool.read(filePath, fileEntry["encoding"])

        meta["loadtimeDeps"] = self._extractLoadtimeDeps(content, fileId)
        meta["runtimeDeps"]  = self._extractRuntimeDeps(content, fileId)
        meta["optionalDeps"] = self._extractOptionalDeps(content)
        meta["ignoreDeps"]   = self._extractIgnoreDeps(content)
        meta["assetDeps"]    = self._extractAssetDeps(content)

        self._console.outdent()

        self._cache.writemulti(cacheId, meta)
        return meta
コード例 #12
0
ファイル: MClassCode.py プロジェクト: 1and1/qooxdoo
    def tree(self, treegen=treegenerator, force=False):

        cache = self.context['cache']
        console = self.context['console']
        tradeSpaceForSpeed = False  # Caution: setting this to True seems to make builds slower, at least on some platforms!?
        cacheId = "tree%s-%s-%s" % (treegen.tag, self.path, util.toString({}))
        self.treeId = cacheId

        # Lookup for unoptimized tree
        tree, _ = cache.read(cacheId, self.path, memory=tradeSpaceForSpeed)

        # Tree still undefined?, create it!
        if tree == None or force:
            console.debug("Parsing file: %s..." % self.id)
            console.indent()

            # Tokenize
            fileContent = filetool.read(self.path, self.encoding)
            fileId = self.path if self.path else self.id
            try:
                tokens = tokenizer.Tokenizer().parseStream(fileContent, self.id)
            except SyntaxException, e:
                # add file info
                e.args = (e.args[0] + "\nFile: %s" % fileId,) + e.args[1:]
                raise e

            # Parse
            try:
                tree = treegen.createFileTree(tokens, fileId)
            except SyntaxException, e:
                # add file info
                e.args = (e.args[0] + "\nFile: %s" % fileId,) + e.args[1:]
                raise
コード例 #13
0
ファイル: MClassCode.py プロジェクト: lite/empweb
    def tree(self, treegen=treegenerator, force=False):

        cache = self.context['cache']
        console = self.context['console']
        tradeSpaceForSpeed = False  # Caution: setting this to True seems to make builds slower, at least on some platforms!?
        cacheId = "tree%s-%s-%s" % (treegen.tag, self.path, util.toString({}))
        self.treeId = cacheId

        # Lookup for unoptimized tree
        tree, _ = cache.read(cacheId, self.path, memory=tradeSpaceForSpeed)

        # Tree still undefined?, create it!
        if tree == None or force:
            console.debug("Parsing file: %s..." % self.id)
            console.indent()

            fileContent = filetool.read(self.path, self.encoding)
            fileId = self.path if self.path else self.id
            try:
                tokens = tokenizer.parseStream(fileContent, self.id)
            except SyntaxException, e:
                # add file info
                e.args = (e.args[0] + "\nFile: %s" % fileId, ) + e.args[1:]
                raise e

            console.outdent()
            console.debug("Generating tree: %s..." % self.id)
            console.indent()
            try:
                tree = treegen.createSyntaxTree(tokens, fileId)
            except SyntaxException, e:
                # add file info
                e.args = (e.args[0] + "\nFile: %s" % fileId, ) + e.args[1:]
                raise e
コード例 #14
0
ファイル: migrator.py プロジェクト: Wkasel/qooxdoo
def migrateFile(
                filePath, compiledPatches, compiledInfos,
                hasPatchModule=False, options=None, encoding="UTF-8"):

    logging.info("  - File: %s" % filePath)

    # Read in original content
    fileContent = filetool.read(filePath, encoding)

    fileId = extractFileContentId(fileContent);

    # Apply patches
    patchedContent = fileContent

    if hasPatchModule and fileId is not None:

        import patch
        tree = treegenerator.createSyntaxTree(tokenizer.parseStream(fileContent))

        # If there were any changes, compile the result
        if patch.patch(fileId, tree):
            options.prettyPrint = True  # make sure it's set
            result = [u'']
            result = pretty.prettyNode(tree, options, result)
            patchedContent = u''.join(result)

    # apply RE patches
    patchedContent = regtool(patchedContent, compiledPatches, True, filePath)
    patchedContent = regtool(patchedContent, compiledInfos, False, filePath)

    # Write file
    if patchedContent != fileContent:
        logging.info("    - %s has been modified. Storing modifications ..." % filePath)
        filetool.save(filePath, patchedContent, encoding)
コード例 #15
0
ファイル: compile.py プロジェクト: simboyz/qooxdoo
def main():
    (options, args) = get_args()
    
    if len(args) == 0:
        print ">>> Missing filename!"
        return

    for fileName in args:
        if not options.quiet:
            print fileName, ":"
            print ">>> Parsing file..."
        fileContent = filetool.read(fileName, "utf-8")

        if options.config:
            read_config(options)

        if options.lint:
            run_lint(fileName, fileContent, options, args)
        elif options.pretty:
            run_pretty(fileName, fileContent, options, args)
        elif options.tree:
            run_tree(fileName, fileContent, options, args)
        elif options.dependencies:
            run_dependencies(fileName, fileContent, options, args)
        else:
            run_compile(fileName, fileContent, options, args)
         
    return
コード例 #16
0
ファイル: CodeMaintenance.py プロジェクト: AaronOpfer/qooxdoo
def runFix(jobconf, classesObj):

    def fixPng():
        return

    def removeBOM(fpath):
        content = open(fpath, "rb").read()
        if content.startswith(codecs.BOM_UTF8):
            console.debug("removing BOM: %s" % filePath)
            open(fpath, "wb").write(content[len(codecs.BOM_UTF8):])
        return

    # - Main ---------------------------------------------------------------

    if not isinstance(jobconf.get("fix-files", False), types.DictType):
        return

    console = Context.console
    classes = classesObj.keys()
    fixsettings = ExtMap(jobconf.get("fix-files"))

    # Fixing JS source files
    console.info("Fixing whitespace in source files...")
    console.indent()

    console.info("Fixing files: ", False)
    numClasses = len(classes)
    eolStyle = fixsettings.get("eol-style", "LF")
    tabWidth = fixsettings.get("tab-width", 2)
    for pos, classId in enumerate(classes):
        console.progress(pos+1, numClasses)
        classEntry   = classesObj[classId]
        filePath     = classEntry.path
        fileEncoding = classEntry.encoding
        fileContent  = filetool.read(filePath, fileEncoding)
        # Caveat: as filetool.read already calls any2Unix, converting to LF will
        # not work as the file content appears unchanged to this function
        if eolStyle == "CR":
            fixedContent = textutil.any2Mac(fileContent)
        elif eolStyle == "CRLF":
            fixedContent = textutil.any2Dos(fileContent)
        else:
            fixedContent = textutil.any2Unix(fileContent)
        fixedContent = textutil.normalizeWhiteSpace(textutil.removeTrailingSpaces(textutil.tab2Space(fixedContent, tabWidth)))
        if fixedContent != fileContent:
            console.debug("modifying file: %s" % filePath)
            filetool.save(filePath, fixedContent, fileEncoding)
        # this has to go separate, as it requires binary operation
        removeBOM(filePath)

    console.outdent()

    # Fixing PNG files -- currently just a stub!
    if fixsettings.get("fix-png", False):
        console.info("Fixing PNGs...")
        console.indent()
        fixPng()
        console.outdent()

    return
コード例 #17
0
ファイル: MClassCode.py プロジェクト: MatiasNAmendola/meyeOS
    def _getSourceTree(self, cacheId, tradeSpaceForSpeed):

        cache = self.context['cache']
        console = self.context['console']

        # Lookup for unoptimized tree
        tree, _ = cache.read(cacheId, self.path, memory=tradeSpaceForSpeed)

        # Tree still undefined?, create it!
        if tree == None:
            console.debug("Parsing file: %s..." % self.id)
            console.indent()

            fileContent = filetool.read(self.path, self.encoding)
            tokens = tokenizer.parseStream(fileContent, self.id)
            
            console.outdent()
            console.debug("Generating tree: %s..." % self.id)
            console.indent()
            tree = treegenerator.createSyntaxTree(tokens)  # allow exceptions to propagate

            # store unoptimized tree
            #print "Caching %s" % cacheId
            cache.write(cacheId, tree, memory=tradeSpaceForSpeed, writeToFile=True)

            console.outdent()
        return tree
コード例 #18
0
ファイル: compile.py プロジェクト: RemiHeugue/qooxdoo
def main():
    (options, args) = get_args()

    if len(args) == 0:
        print(">>> Missing filename!")
        return

    for fileName in args:
        if not options.quiet:
            print(fileName, ":")
            print(">>> Parsing file...")

        if fileName == "-":
            # enables: echo "1+2" | compile.py -
            fileContent = sys.stdin.read()
        else:
            fileContent = filetool.read(fileName, "utf-8")

        if options.config:
            read_config(options)

        if options.lint:
            run_lint(fileName, fileContent, options, args)
        elif options.pretty:
            run_pretty(fileName, fileContent, options, args)
        elif options.tree:
            run_tree(fileName, fileContent, options, args)
        elif options.dependencies:
            run_dependencies(fileName, fileContent, options, args)
        else:
            run_compile(fileName, fileContent, options, args)

    return
コード例 #19
0
def migrateFile(
                filePath, compiledPatches, compiledInfos,
                hasPatchModule=False, options=None, encoding="UTF-8"):

    logging.info("  - File: %s" % filePath)

    # Read in original content
    fileContent = filetool.read(filePath, encoding)

    fileId = extractFileContentId(fileContent);

    # Apply patches
    patchedContent = fileContent

    if hasPatchModule and fileId is not None:

        import patch
        tree = treegenerator.createFileTree(tokenizer.parseStream(fileContent))

        # If there were any changes, compile the result
        if patch.patch(fileId, tree):
            options.prettyPrint = True  # make sure it's set
            result = [u'']
            result = pretty.prettyNode(tree, options, result)
            patchedContent = u''.join(result)

    # apply RE patches
    patchedContent = regtool(patchedContent, compiledPatches, True, filePath)
    patchedContent = regtool(patchedContent, compiledInfos, False, filePath)

    # Write file
    if patchedContent != fileContent:
        logging.info("    - %s has been modified. Storing modifications ..." % filePath)
        filetool.save(filePath, patchedContent, encoding)
コード例 #20
0
ファイル: compile.py プロジェクト: jungsagacity/qooxdoo
def main():
    (options, args) = get_args()

    if len(args) == 0:
        print ">>> Missing filename!"
        return

    for fileName in args:
        if not options.quiet:
            print fileName, ":"
            print ">>> Parsing file..."
        fileContent = filetool.read(fileName, "utf-8")

        if options.config:
            read_config(options)

        if options.lint:
            run_lint(fileName, fileContent, options, args)
        elif options.pretty:
            run_pretty(fileName, fileContent, options, args)
        elif options.tree:
            run_tree(fileName, fileContent, options, args)
        elif options.dependencies:
            run_dependencies(fileName, fileContent, options, args)
        else:
            run_compile(fileName, fileContent, options, args)

    return
コード例 #21
0
ファイル: MClassCode.py プロジェクト: w495/acv
    def tree(self, treegen=treegenerator, force=False):

        cache = self.context["cache"]
        console = self.context["console"]
        tradeSpaceForSpeed = (
            False
        )  # Caution: setting this to True seems to make builds slower, at least on some platforms!?
        cacheId = "tree%s-%s-%s" % (treegen.tag, self.path, util.toString({}))
        self.treeId = cacheId

        # Lookup for unoptimized tree
        tree, _ = cache.read(cacheId, self.path, memory=tradeSpaceForSpeed)

        # Tree still undefined?, create it!
        if tree == None or force:
            console.debug("Parsing file: %s..." % self.id)
            console.indent()

            fileContent = filetool.read(self.path, self.encoding)
            tokens = tokenizer.parseStream(fileContent, self.id)

            console.outdent()
            console.debug("Generating tree: %s..." % self.id)
            console.indent()
            tree = treegen.createSyntaxTree(tokens)  # allow exceptions to propagate

            # store unoptimized tree
            # print "Caching %s" % cacheId
            cache.write(cacheId, tree, memory=tradeSpaceForSpeed, writeToFile=True)

            console.outdent()
        return tree
コード例 #22
0
ファイル: MClassCode.py プロジェクト: 1and1/qooxdoo
    def getCode(self, compOptions, treegen=treegenerator, featuremap={}):

        # source versions
        if not compOptions.optimize:
            compiled = filetool.read(self.path)
            # assure trailing \n (e.g. to utilise ASI)
            if compiled[-1:] != "\n":
                compiled += '\n'
        # compiled versions
        else:
            optimize  = compOptions.optimize
            variants  = compOptions.variantset
            format_   = compOptions.format
            classVariants     = self.classVariants()
            # relevantVariants is the intersection between the variant set of this job
            # and the variant keys actually used in the class
            relevantVariants  = self.projectClassVariantsToCurrent(classVariants, variants)
            variantsId        = util.toString(relevantVariants)
            optimizeId        = self._optimizeId(optimize)
            cache             = self.context["cache"]

            cacheId = "compiled-%s-%s-%s-%s" % (self.path, variantsId, optimizeId, format_)
            compiled, _ = cache.read(cacheId, self.path)

            if compiled == None:
                tree = self.optimize(None, optimize, variants, featuremap)
                compiled = self.serializeTree(tree, optimize, format_)
                if not "statics" in optimize:
                    cache.write(cacheId, compiled)

        return compiled
コード例 #23
0
        def loadTemplate(bootCode):
            if bootCode:
                loaderFile = os.path.join(filetool.root(), os.pardir, "data", "generator", "loader-build.tmpl.js")
            else:
                loaderFile = os.path.join(filetool.root(), os.pardir, "data", "generator", "loader-source.tmpl.js")
            template = filetool.read(loaderFile)

            return template
コード例 #24
0
def generateHttpdConfig(jobconf, confObj):
    console = Context.console
    # read config
    jconf_app_namespace = jobconf.get("let/APPLICATION")
    assert jconf_app_namespace
    jconf_conf_dir = jobconf.get("web-server-config/output-dir", ".")
    jconf_conf_dir = confObj.absPath(jconf_conf_dir)
    jconf_template_dir = jobconf.get("web-server-config/template-dir")
    assert jconf_template_dir
    jconf_httpd_type = jobconf.get("web-server-config/httpd-type", "apache2")
    jconf_httpd_hosturl = jobconf.get("web-server-config/httpd-host-url",
                                      "http://localhost")

    libs = jobconf.get("library", [])
    for lib in libs:
        lib._init_from_manifest()

    config_path = os.path.join(jconf_conf_dir, jconf_httpd_type + ".conf")
    template_path = os.path.join(jconf_template_dir,
                                 "httpd." + jconf_httpd_type + ".tmpl.conf")
    alias_path = jconf_app_namespace.replace(".", "/")

    # collect config values
    value_map = {
        "APP_HTTPD_CONFIG": "",
        "LOCALHOST_APP_URL": "",
        "APP_NAMESPACE_AS_PATH": "",
        "APP_DOCUMENT_ROOT": "",
    }

    value_map['APP_HTTPD_CONFIG'] = config_path

    doc_root = jobconf.get("web-server-server/document-root",
                           "") or get_doc_root(jobconf, confObj)
    doc_root = os.path.normpath(
        confObj.absPath(doc_root))  # important to normpath() coz '\' vs. '/'
    value_map['APP_DOCUMENT_ROOT'] = ensure_trailing_slash(doc_root)

    app_web_path = from_doc_root_to_app_root(jobconf, confObj, doc_root)
    value_map['LOCALHOST_APP_URL'] = "/".join(
        (jconf_httpd_hosturl, alias_path, app_web_path))

    value_map['APP_NAMESPACE_AS_PATH'] = alias_path

    # load httpd-specific template
    config_templ = filetool.read(template_path)
    # replace macros
    config_templ = string.Template(config_templ)
    config = config_templ.safe_substitute(value_map)
    # write .conf file
    console.info("Writing configuration file for '%s': '%s'" %
                 (jconf_httpd_type, config_path))
    filetool.save(config_path, config)
    console.info(
        "See the file's comments how to integrate it with the web server configuration"
    )
    console.info("Then open your source application with '%s'" %
                 value_map['LOCALHOST_APP_URL'])
コード例 #25
0
ファイル: ecmalint.py プロジェクト: StephanHoyer/DJsAdmin
    def __init__(self, filename, logger=None):
        self.filename = filename
        content = filetool.read(filename)

        self.tree = treegenerator.createSyntaxTree(tokenizer.parseStream(content))
        self.script = Script(self.tree, self.filename)
        if not logger:
            self.logger = ConsoleLogger()
        else:
            self.logger = logger
コード例 #26
0
    def getPackageApi(self, packageId):
        if not packageId in self._docs:
            self._console.debug("Missing package docs: %s" % packageId)
            return None

        packageEntry = self._docs[packageId]

        text = filetool.read(packageEntry["path"])
        node = api.createPackageDoc(text, packageId)

        return node
コード例 #27
0
    def __init__(self, filename, logger=None):
        self.filename = filename
        content = filetool.read(filename)

        self.tree = treegenerator.createSyntaxTree(
            tokenizer.parseStream(content))
        self.script = Script(self.tree, self.filename)
        if not logger:
            self.logger = ConsoleLogger()
        else:
            self.logger = logger
コード例 #28
0
 def getPackageApi(self, packageId):
     if not packageId in self._docs:
         self._console.debug("Missing package docs: %s" % packageId)
         return None
         
     packageEntry = self._docs[packageId]
     
     text = filetool.read(packageEntry["path"])
     node = api.createPackageDoc(text, packageId)
     
     return node
コード例 #29
0
ファイル: ApiLoader.py プロジェクト: lite/empweb
 def getPackageApi(self, packageId):
     if not packageId in self._docs:
         if packageId:  # don't complain empty root namespace
             self._console.warn("Missing package docs: %s" % packageId)
         return None
         
     packageEntry = self._docs[packageId]
     
     text = filetool.read(packageEntry["path"])
     node = api.createPackageDoc(text, packageId)
     
     return node
コード例 #30
0
 def getCode(self, compile_options=None, variants=None, source_with_comments=False):
     result = u''
     # source versions
     if not compile_options:
         result = filetool.read(self.path)
         if not source_with_comments:
             result = strip_comments(result)
     # compiled versions
     else:
         tree = self.optimize(self.ast, compile_options, variants)
         result =compiler.compile(tree)
     return result
コード例 #31
0
 def parseMetaFile(self, path):
     # Read the .meta file
     # it doesn't seem worth to apply caching here
     meta_fname = os.path.splitext(path)[0] + '.meta'
     try:
         meta_content = filetool.read(meta_fname)
         fontDict = json.loads(meta_content)
     except Exception, e:
         msg = "Reading of .meta file failed: '%s'" % meta_fname + (
             "\n%s" % e.args[0] if e.args else "")
         e.args = (msg, ) + e.args[1:]
         raise
コード例 #32
0
 def toResinfo(self):
     result = super(self.__class__, self).toResinfo()
     if self.format == "b64" and self.path:
         try:
             cont = filetool.read(self.path)
             cont = json.loads(cont)
         except Exception, e:
             msg = "Reading of b64 image file failed: '%s'" % self.path + (
                 "\n%s" % e.args[0] if e.args else "")
             e.args = (msg, ) + e.args[1:]
             raise
         else:
             result.append(cont)
コード例 #33
0
ファイル: MClassCode.py プロジェクト: MatiasNAmendola/meyeOS
    def getCode(self, compOptions):

        result = u''
        # source versions
        if not compOptions.optimize:
            result = filetool.read(self.path)
            if result[-1:] != "\n": # assure trailing \n
                result += '\n'
        # compiled versions
        else:
            result = self._getCompiled(compOptions)

        return result
コード例 #34
0
ファイル: FontMap.py プロジェクト: RemiHeugue/qooxdoo
 def parseMetaFile(self, path):
     # Read the .meta file
     # it doesn't seem worth to apply caching here
     meta_fname   = os.path.splitext(path)[0]+'.meta'
     try:
         meta_content = filetool.read(meta_fname)
         fontDict      = json.loads(meta_content)
     except Exception, e:
         msg = "Reading of .meta file failed: '%s'" % meta_fname + (
             "\n%s" % e.args[0] if e.args else ""
             )
         e.args = (msg,) + e.args[1:]
         raise
コード例 #35
0
ファイル: CombinedImage.py プロジェクト: RemiHeugue/qooxdoo
 def toResinfo(self):
     result = super(self.__class__, self).toResinfo()
     if self.format == "b64" and self.path:
         try:
             cont = filetool.read(self.path)
             cont = json.loads(cont)
         except Exception, e:
             msg = "Reading of b64 image file failed: '%s'" % self.path + (
                 "\n%s" % e.args[0] if e.args else ""
                 )
             e.args = (msg,) + e.args[1:]
             raise
         else:
             result.append(cont)
コード例 #36
0
 def getPackageApi(self, packageId):
     if not self._docs.has_key(packageId):
         self._console.debug("Missing package docs: %s" % packageId)
         return None
         
     packageEntry = self._docs[packageId]
     
     text = filetool.read(packageEntry["path"])
     # Add surrounding comment markers for non-javascript files
     if not packageEntry["path"].endswith(".js"):
         text = "/*\n" + text + "\n*/"
     node = api.createPackageDoc(text, packageId)
     
     return node
コード例 #37
0
ファイル: MiniWebServer.py プロジェクト: 6r1d/qooxdoo
def generateHttpdConfig(jobconf, confObj):
    console = Context.console
    # read config
    jconf_app_namespace = jobconf.get("let/APPLICATION")
    assert jconf_app_namespace
    jconf_conf_dir = jobconf.get("web-server-config/output-dir", ".")
    jconf_conf_dir = confObj.absPath(jconf_conf_dir)
    jconf_template_dir = jobconf.get("web-server-config/template-dir")
    assert jconf_template_dir
    jconf_httpd_type = jobconf.get("web-server-config/httpd-type", "apache2")
    jconf_httpd_hosturl = jobconf.get("web-server-config/httpd-host-url", "http://localhost")

    libs = jobconf.get("library", [])
    assert libs
    for lib in libs:
        lib._init_from_manifest()

    config_path = os.path.join(jconf_conf_dir, jconf_httpd_type + ".conf")
    template_path = os.path.join(jconf_template_dir, "httpd." + jconf_httpd_type + ".tmpl.conf")
    alias_path = jconf_app_namespace.replace(".", "/")

    # collect config values
    value_map = {
        "APP_HTTPD_CONFIG"      : "",
        "LOCALHOST_APP_URL"     : "",
        "APP_NAMESPACE_AS_PATH" : "",
        "APP_DOCUMENT_ROOT"     : "",
    }

    value_map['APP_HTTPD_CONFIG'] = config_path

    doc_root = get_doc_root(jobconf, confObj)
    value_map['APP_DOCUMENT_ROOT'] = ensure_trailing_slash(doc_root)

    app_web_path = from_doc_root_to_app_root(jobconf, confObj, doc_root)
    value_map['LOCALHOST_APP_URL'] = "/".join((jconf_httpd_hosturl, alias_path, app_web_path))

    value_map['APP_NAMESPACE_AS_PATH'] = alias_path

    # load httpd-specific template
    config_templ = filetool.read(template_path)
    # replace macros
    config_templ = string.Template(config_templ)
    config = config_templ.safe_substitute(value_map)
    # write .conf file
    console.info("Writing configuration file for '%s': '%s'" % (jconf_httpd_type, config_path))
    filetool.save(config_path, config)
    console.info("See the file's comments how to integrate it with the web server configuration")
    console.info("Then open your source application with '%s'" % value_map['LOCALHOST_APP_URL'])
コード例 #38
0
    def parseMetaFile(self, path):
        # Read the .meta file
        # it doesn't seem worth to apply caching here
        meta_fname   = os.path.splitext(path)[0]+'.meta'
        meta_content = filetool.read(meta_fname)
        imgDict      = json.loads(meta_content)

        # Loop through the images of the .meta file
        for imageId, imageSpec_ in imgDict.items():
            # sort of like this: imageId : [width, height, type, combinedUri, off-x, off-y]
            imageObject = Image()
            imageObject.id = imageId
            imageObject = imageObject.fromMeta(imageSpec_)
            self.embeds.append(imageObject)
        return
コード例 #39
0
    def parseMetaFile(self, path):
        # Read the .meta file
        # it doesn't seem worth to apply caching here
        meta_fname = os.path.splitext(path)[0] + '.meta'
        meta_content = filetool.read(meta_fname)
        imgDict = json.loads(meta_content)

        # Loop through the images of the .meta file
        for imageId, imageSpec_ in imgDict.items():
            # sort of like this: imageId : [width, height, type, combinedUri, off-x, off-y]
            imageObject = Image()
            imageObject.id = imageId
            imageObject = imageObject.fromMeta(imageSpec_)
            self.embeds.append(imageObject)
        return
コード例 #40
0
        def get_hint_meta():
            meta = {}

            console.indent()

            content = filetool.read(filePath, fileEntry.encoding)

            meta["loadtimeDeps"] = _extractLoadtimeDeps(content, fileId)
            meta["runtimeDeps"] = _extractRuntimeDeps(content, fileId)
            meta["optionalDeps"] = _extractOptionalDeps(content)
            meta["ignoreDeps"] = _extractIgnoreDeps(content)
            try:
                meta["assetDeps"] = _extractAssetDeps(content)
            except ValueError, e:
                e.args = (e.args[0] + u' in: %r' % filePath, ) + e.args[1:]
                raise e
コード例 #41
0
            def loadTemplate(bootCode):
                # try custom loader templates
                loaderFile = compConf.get("paths/loader-template", None)
                if not loaderFile:
                    # use default templates
                    if version=="build":
                        #loaderFile = os.path.join(filetool.root(), os.pardir, "data", "generator", "loader-build.tmpl.js")
                        # TODO: test-wise using generic template
                        loaderFile = os.path.join(filetool.root(), os.pardir, "data", "generator", "loader.tmpl.js")
                    else:
                        #loaderFile = os.path.join(filetool.root(), os.pardir, "data", "generator", "loader-source.tmpl.js")
                        loaderFile = os.path.join(filetool.root(), os.pardir, "data", "generator", "loader.tmpl.js")
                
                template = filetool.read(loaderFile)

                return template
コード例 #42
0
ファイル: MClassHints.py プロジェクト: RemiHeugue/qooxdoo
        def get_hint_meta():
            meta = {}

            console.indent()

            content = filetool.read(filePath, fileEntry.encoding)

            meta["loadtimeDeps"] = _extractLoadtimeDeps(content, fileId)
            meta["runtimeDeps"]  = _extractRuntimeDeps(content, fileId)
            meta["optionalDeps"] = _extractOptionalDeps(content)
            meta["ignoreDeps"]   = _extractIgnoreDeps(content)
            try:
                meta["assetDeps"]    = _extractAssetDeps(content)
            except ValueError, e:
                e.args = (e.args[0] + u' in: %r' % filePath,) + e.args[1:]
                raise e
コード例 #43
0
    def parseMetaFile(self, path):
        # Read the .meta file
        # it doesn't seem worth to apply caching here
        meta_fname   = os.path.splitext(path)[0]+'.meta'
        meta_content = filetool.read(meta_fname)
        imgDict      = json.loads(meta_content)

        # Loop through the images of the .meta file
        for imageId, imageSpec_ in imgDict.items():
            self._console.debug("found embedded image: %r" % imageId)
            # sort of like this: imagePath : [width, height, type, combinedUri, off-x, off-y]

            imageObject = ImgInfoFmt(imageSpec_) # turn this into an ImgInfoFmt object, to abstract from representation in .meta file
            self.embeds[imageId] = imageObject

        return
コード例 #44
0
            def loadTemplate(bootCode):
                # try custom loader templates
                loaderFile = compConf.get("paths/loader-template", None)
                if not loaderFile:
                    # use default templates
                    if version=="build":
                        #loaderFile = os.path.join(filetool.root(), os.pardir, "data", "generator", "loader-build.tmpl.js")
                        # TODO: test-wise using generic template
                        loaderFile = os.path.join(filetool.root(), os.pardir, "data", "generator", "loader.tmpl.js")
                    else:
                        #loaderFile = os.path.join(filetool.root(), os.pardir, "data", "generator", "loader-source.tmpl.js")
                        loaderFile = os.path.join(filetool.root(), os.pardir, "data", "generator", "loader.tmpl.js")
                
                template = filetool.read(loaderFile)

                return template
コード例 #45
0
    def getTree(self, fileId, variants=None):
        fileEntry = self._classes[fileId]
        filePath = fileEntry["path"]

        if variants:
            cacheId = "tree-%s-%s" % (filePath, util.toString(variants))
        else:
            cacheId = "tree-%s" % filePath

        tradeSpaceForSpeed = False  # Caution: setting this to True seems to make builds slower, at least on some platforms!?

        tree = self._cache.read(cacheId, filePath, memory=tradeSpaceForSpeed)
        if tree != None:
            return tree

        # Lookup for unoptimized tree
        if variants != None:
            tree = self._cache.read("tree-%s" % fileId, filePath, memory=tradeSpaceForSpeed)

        # Tree still undefined?, create it!
        if tree == None:
            self._console.debug("Parsing file: %s..." % fileId)
            self._console.indent()

            fileContent = filetool.read(fileEntry["path"], fileEntry["encoding"])
            tokens = tokenizer.parseStream(fileContent, fileId)
            
            self._console.outdent()
            self._console.debug("Generating tree: %s..." % fileId)
            self._console.indent()
            tree = treegenerator.createSyntaxTree(tokens)  # allow exceptions to propagate

            # store unoptimized tree
            self._cache.write("tree-%s" % fileId, tree, memory=tradeSpaceForSpeed, writeToFile=True)

            self._console.outdent()

        # Call variant optimizer
        if variants != None:
            self._console.debug("Selecting variants: %s..." % fileId)
            self._console.indent()
            variantoptimizer.search(tree, variants, fileId)
            self._console.outdent()
            # store optimized tree
            self._cache.write(cacheId, tree, memory=tradeSpaceForSpeed, writeToFile=True)

        return tree
コード例 #46
0
ファイル: Migrator.py プロジェクト: emtee40/testingazuan
    def readPatchInfoFiles(self, baseDir):
        """
        Reads all patch/info files from a directory and compiles the containing
        regular expressions.
        Retuns a list comiled RE (the output of entryCompiler)
        """
        patchList = []
        emptyLine = re.compile("^\s*$")

        for root, dirs, files in os.walk(baseDir):

            # Filter ignored directories
            for ignoredDir in [".svn", "CVS"]:
                if ignoredDir in dirs:
                    dirs.remove(ignoredDir)

            # Searching for files
            for fileName in files:
                filePath = os.path.join(root, fileName)

                fileContent = textutil.any2Unix(
                    filetool.read(filePath, "utf-8"))
                patchList.append({
                    "path": filePath,
                    "content": fileContent.split("\n")
                })

                logging.debug("    - %s" % filePath)

        logging.debug("    - Compiling expressions...")

        compiledPatches = []

        for patchFile in patchList:
            logging.debug("      - %s" % os.path.basename(patchFile["path"]))
            for line in patchFile["content"]:
                if emptyLine.match(line) or line.startswith(
                        "#") or line.startswith("//"):
                    continue

                compiled = self.entryCompiler(line)
                if compiled != None:
                    compiledPatches.append(compiled)

        return compiledPatches
コード例 #47
0
ファイル: loader.py プロジェクト: StephanHoyer/DJsAdmin
def getTokens(fileDb, fileId, options):
    if not fileDb[fileId].has_key("tokens"):
        if options.verbose:
            print "    - Generating tokens for %s..." % fileId

        useCache = False
        loadCache = False

        fileEntry = fileDb[fileId]

        filePath = fileEntry["path"]
        fileEncoding = fileEntry["encoding"]

        if options.cacheDirectory != None:
            cachePath = os.path.join(filetool.normalize(options.cacheDirectory), fileId + "-tokens.pcl")
            useCache = True

            if not filetool.checkCache(filePath, cachePath):
                loadCache = True

        if loadCache:
            tokens = filetool.readCache(cachePath)
        else:
            fileContent = filetool.read(filePath, fileEncoding)

            # TODO: This hack is neccesary because the current parser cannot handle comments
            #       without a context.
            if fileDb[fileId]["meta"]:
                fileContent += "\n(function() {})()"

            tokens = tokenizer.parseStream(fileContent, fileId)

            if useCache:
                if options.verbose:
                    print "    - Caching tokens for %s..." % fileId

                filetool.storeCache(cachePath, tokens)

        fileDb[fileId]["tokens"] = tokens

    return fileDb[fileId]["tokens"]
コード例 #48
0
ファイル: migrator.py プロジェクト: Wkasel/qooxdoo
def readPatchInfoFiles(baseDir):
    """
    Reads all patch/info files from a directory and compiles the containing
    regular expressions.
    Retuns a list comiled RE (the output of entryCompiler)
    """
    patchList = []
    emptyLine = re.compile("^\s*$")

    for root, dirs, files in os.walk(baseDir):

        # Filter ignored directories
        for ignoredDir in [".svn", "CVS"]:
            if ignoredDir in dirs:
                dirs.remove(ignoredDir)

        # Searching for files
        for fileName in files:
            filePath = os.path.join(root, fileName)

            fileContent = textutil.any2Unix(filetool.read(filePath, "utf-8"))
            patchList.append({"path":filePath, "content":fileContent.split("\n")})

            logging.debug("    - %s" % filePath)

    logging.debug("    - Compiling expressions...")

    compiledPatches = []

    for patchFile in patchList:
        logging.debug("      - %s" % os.path.basename(patchFile["path"]))
        for line in patchFile["content"]:
            if emptyLine.match(line) or line.startswith("#") or line.startswith("//"):
                continue

            compiled = entryCompiler(line)
            if compiled != None:
                compiledPatches.append(compiled)

    return compiledPatches
コード例 #49
0
    def getTree(self, fileId, variants=None):
        fileEntry = self._classes[fileId]
        filePath = fileEntry["path"]

        if variants:
            cacheId = "tree-%s-%s" % (fileId, idlist.toString(variants))
        else:
            cacheId = "tree-%s" % fileId

        tree = self._cache.read(cacheId, filePath)
        if tree != None:
            return tree

        # Lookup for unoptimized tree
        if variants != None:
            tree = self._cache.read("tree-%s" % fileId, filePath)

        # Tree still undefined?, create it!
        if tree == None:
            self._console.debug("Parsing file: %s..." % fileId)
            self._console.indent()

            fileEntry = self._classes[fileId]
            fileContent = filetool.read(fileEntry["path"], fileEntry["encoding"])
            tokens = tokenizer.parseStream(fileContent, fileId)
            
            self._console.outdent()
            self._console.debug("Generating tree: %s..." % fileId)
            self._console.indent()

            try:
                tree = treegenerator.createSyntaxTree(tokens)
            except treegenerator.SyntaxException, detail:
                self._console.error("%s" % detail)
                sys.exit(1)

            self._console.outdent()
            self._console.debug("Selecting variants: %s..." % fileId)
            self._console.indent()
コード例 #50
0
ファイル: svninfo.py プロジェクト: woongsikchoi/qooxdoo
def query(path):
    if os.path.exists(path):
        entries = os.path.join(path, ".svn", "entries")

        if os.path.exists(entries):

            # old (svn 1.3) XML style format
            try:
                tree = ElementTree.parse(entries)
                for entry in tree.findall("{svn:}entry"):
                    revision = entry.get("revision")
                    url = entry.get("url")
                    if revision != None and url != None:
                        url = url.split("/")

                        folder = url[5]
                        if folder in ["tags", "branches"]:
                            folder = url[6]

                        return revision, folder
                        #return revision
            except Exception, e:
                pass

            # new (svn 1.4) file format
            content = filetool.read(entries)

            mtch = DIRINFO.search(content)
            if mtch:
                folder = mtch.group(2)
                if folder in ["tags", "branches"]:
                    folder = mtch.group(3)

                revision = mtch.group(1)

                return revision, folder
コード例 #51
0
def indexFile(filePath, filePathId, classPath, listIndex, classEncoding, classUri, resourceInput, resourceOutput, options, fileDb={}, moduleDb={}):

    ########################################
    # Checking cache
    ########################################

    useCache = False
    loadCache = False
    cachePath = None

    if options.cacheDirectory != None:
        cachePath = os.path.join(filetool.normalize(options.cacheDirectory), filePathId + "-entry.pcl")
        useCache = True

        if not filetool.checkCache(filePath, cachePath):
            loadCache = True



    ########################################
    # Loading file content / cache
    ########################################

    if loadCache:
        fileEntry = filetool.readCache(cachePath)
        fileId = filePathId

    else:
        fileContent = filetool.read(filePath, classEncoding)

        # Extract ID
        fileContentId = extractFileContentId(fileContent)

        # Search for valid ID
        if fileContentId == None:
            if not filePathId.endswith("__init__"):
                print "    - Could not extract ID from file: %s. Fallback to path %s!" % (filePath, filePathId)
            fileId = filePathId

        else:
            fileId = fileContentId

        if fileId != filePathId:
            print "    - ID mismatch: CONTENT=%s != PATH=%s" % (fileContentId, filePathId)
            if not options.migrateSource:
                sys.exit(1)

        fileEntry = {
            "autoDependencies" : False,
            "cached" : False,
            "cachePath" : cachePath,
            "meta" : fileId.endswith("__init__"),
            "ignoreDeps" : extractIgnore(fileContent, fileId),
            "optionalDeps" : extractOptional(fileContent, fileId),
            "loadtimeDeps" : extractLoadtimeDeps(fileContent, fileId),
            "runtimeDeps" : extractRuntimeDeps(fileContent, fileId),
            "resources" : extractResources(fileContent, fileId),
            "embeds" : extractEmbeds(fileContent, fileId),
            "modules" : extractModules(fileContent, fileId)
        }



    ########################################
    # Additional data
    ########################################

    # We don't want to cache these items
    fileEntry["path"] = filePath
    fileEntry["pathId"] = filePathId
    fileEntry["encoding"] = classEncoding
    fileEntry["resourceInput"] = resourceInput
    fileEntry["resourceOutput"] = resourceOutput
    fileEntry["classUri"] = classUri
    fileEntry["listIndex"] = listIndex
    fileEntry["classPath"] = classPath


    ########################################
    # Registering file
    ########################################

    # Register to file database
    fileDb[fileId] = fileEntry

    # Register to module database
    for moduleId in fileEntry["modules"]:
        if moduleId in moduleDb:
            moduleDb[moduleId].append(fileId)
        else:
            moduleDb[moduleId] = [fileId]
コード例 #52
0
    def getHints(self, metatype=""):
        def _extractLoadtimeDeps(data, fileId):
            deps = []

            for item in self.HEAD["require"].findall(data):
                if item == fileId:
                    raise NameError("Self-referring load dependency: %s" %
                                    item)
                else:
                    deps.append(item)

            return deps

        def _extractRuntimeDeps(data, fileId):
            deps = []

            for item in self.HEAD["use"].findall(data):
                if item == fileId:
                    console.error("Self-referring runtime dependency: %s" %
                                  item)
                else:
                    deps.append(item)

            return deps

        def _extractOptionalDeps(data):
            deps = []

            # Adding explicit requirements
            for item in self.HEAD["optional"].findall(data):
                if not item in deps:
                    deps.append(item)

            return deps

        def _extractIgnoreDeps(data):
            ignores = []

            # Adding explicit requirements
            for item in self.HEAD["ignore"].findall(data):
                if not item in ignores:
                    ignores.append(item)

            return ignores

        def _extractAssetDeps(data):
            deps = []
            #asset_reg = re.compile("^[\$\.\*a-zA-Z0-9/{}_-]+$")
            asset_reg = re.compile(
                r"^[\$\.\*\w/{}-]+$", re.U
            )  # have to include "-", which is permissible in paths, e.g. "folder-open.png"

            for item in self.HEAD["asset"].findall(data):
                if not asset_reg.match(item):
                    raise ValueError, "Illegal asset declaration: %s" % item
                if not item in deps:
                    deps.append(item)

            return deps

        def _extractCLDRDeps(data):
            cldr = []

            # Adding explicit requirements
            if self.HEAD["cldr"].findall(data):
                cldr = [True]

            return cldr

        def _extractUnknownDeps(data):
            unknown_keys = []
            known_keys = [x for x in self.HEAD if x != "_unknown_"]

            # here, i'm interested in the key rather than the value
            for item in self.HEAD["_unknown_"].findall(data):
                if item in known_keys:
                    continue
                elif item not in unknown_keys:
                    unknown_keys.append(item)

            return unknown_keys

        # ----------------------------------------------------------

        fileEntry = self
        filePath = fileEntry.path
        fileId = self.id
        cacheId = "meta-%s" % filePath
        cache = self.context['cache']
        console = self.context['console']

        meta, _ = cache.readmulti(cacheId, filePath)
        if meta != None:
            if metatype:
                return meta[metatype]
            else:
                return meta

        meta = {}

        console.indent()

        content = filetool.read(filePath, fileEntry.encoding)

        meta["loadtimeDeps"] = _extractLoadtimeDeps(content, fileId)
        meta["runtimeDeps"] = _extractRuntimeDeps(content, fileId)
        meta["optionalDeps"] = _extractOptionalDeps(content)
        meta["ignoreDeps"] = _extractIgnoreDeps(content)
        try:
            meta["assetDeps"] = _extractAssetDeps(content)
        except ValueError, e:
            e.args = (e.args[0] + u' in: %r' % filePath, ) + e.args[1:]
            raise e
コード例 #53
0
def runFix(jobconf, classesObj):
    def fixPng():
        return

    def removeBOM(fpath):
        content = open(fpath, "rb").read()
        if content.startswith(codecs.BOM_UTF8):
            console.debug("removing BOM: %s" % filePath)
            open(fpath, "wb").write(content[len(codecs.BOM_UTF8):])
        return

    # - Main ---------------------------------------------------------------

    if not isinstance(jobconf.get("fix-files", False), types.DictType):
        return

    console = Context.console
    classes = classesObj.keys()
    fixsettings = ExtMap(jobconf.get("fix-files"))

    # Fixing JS source files
    console.info("Fixing whitespace in source files...")
    console.indent()

    console.info("Fixing files: ", False)
    numClasses = len(classes)
    eolStyle = fixsettings.get("eol-style", "LF")
    tabWidth = fixsettings.get("tab-width", 2)
    for pos, classId in enumerate(classes):
        console.progress(pos + 1, numClasses)
        classEntry = classesObj[classId]
        filePath = classEntry.path
        fileEncoding = classEntry.encoding
        fileContent = filetool.read(filePath, fileEncoding)
        # Caveat: as filetool.read already calls any2Unix, converting to LF will
        # not work as the file content appears unchanged to this function
        if eolStyle == "CR":
            fixedContent = textutil.any2Mac(fileContent)
        elif eolStyle == "CRLF":
            fixedContent = textutil.any2Dos(fileContent)
        else:
            fixedContent = textutil.any2Unix(fileContent)
        fixedContent = textutil.normalizeWhiteSpace(
            textutil.removeTrailingSpaces(
                textutil.tab2Space(fixedContent, tabWidth)))
        if fixedContent != fileContent:
            console.debug("modifying file: %s" % filePath)
            filetool.save(filePath, fixedContent, fileEncoding)
        # this has to go separate, as it requires binary operation
        removeBOM(filePath)

    console.outdent()

    # Fixing PNG files -- currently just a stub!
    if fixsettings.get("fix-png", False):
        console.info("Fixing PNGs...")
        console.indent()
        fixPng()
        console.outdent()

    return
コード例 #54
0
ファイル: tokenizer.py プロジェクト: imsoftware/qooxdoo
##
# Remove whitespace at the beginning of subsequent lines in a multiline text
# (usually comment).
LeadingSpace = re.compile('\A\s+', re.U)


def alignMultiLines(text, firstColumn):
    firstIndent = firstColumn - 1  # columns start with 1
    lines = text.split('\n')
    nlines = [lines[0]]
    for line in lines[1:]:
        mo = LeadingSpace.search(line)
        # only touch lines that are at least indented as the first line
        if mo and len(mo.group()) >= firstIndent:
            nline = LeadingSpace.sub(' ' * (len(mo.group()) - firstIndent),
                                     line)
        else:
            nline = line
        nlines.append(nline)
    return '\n'.join(nlines)


if __name__ == "__main__":
    from misc import filetool
    if len(sys.argv) > 1:
        fname = sys.argv[1]
        text = filetool.read(fname)
        toks = parseStream(text)
        for tok in toks:
            print tok
コード例 #55
0
        #print repr(res)
    elif x == b:
        res = block()
        print res.toXml()
        #print repr(res)
    else:
        raise RuntimeError("Wrong test parameter: %s" % x)


if __name__ == "__main__":
    from ecmascript.frontend import tokenizer
    if len(sys.argv)>1:
        arg1 = sys.argv[1]
        p = TreeGenerator()
        if os.path.isfile(arg1):
            text = filetool.read(sys.argv[1])
        else:
            text = arg1.decode('unicode_escape')  # 'string_escape' would work too
        tokenArr = tokenizer.parseStream(text)
        print p.parse(tokenArr).toXml()
    else:
        #execfile (os.path.normpath(os.path.join(os.environ["QOOXDOO_PATH"], "tool/test/compiler/treegenerator.py")))
        execfile (os.path.normpath(os.path.join(__file__, "../../../../test/compiler/treegenerator.py"))) # __file__ doesn't seem to work in pydb
        for t in tests:
            test(*t)




##
# A plan for the new parser:
コード例 #56
0
    def _scanClassPath(self, path, uri, encoding):
        if not os.path.exists(path):
            self._console.error(
                "The given path does not contains a class folder: %s" % path)
            sys.exit(1)

        self._console.debug("Scanning class folder...")

        # Iterate...
        for root, dirs, files in os.walk(path):
            # Filter ignored directories
            for ignoredDir in self._ignoredDirectories:
                if ignoredDir in dirs:
                    dirs.remove(ignoredDir)

            # Searching for files
            for fileName in files:
                # Ignore dot files
                if fileName.startswith("."):
                    continue

                # Process path data
                filePath = os.path.join(root, fileName)
                fileRel = filePath.replace(path + os.sep, "")
                fileExt = os.path.splitext(fileName)[-1]

                # Compute full URI from relative path
                fileUri = uri + "/" + fileRel.replace(os.sep, "/")

                # Compute identifier from relative path
                filePathId = fileRel.replace(fileExt, "").replace(os.sep, ".")

                # Extract package ID
                filePackage = filePathId[:filePathId.rfind(".")]

                # Handle doc files
                if fileName == self._docFilename:
                    fileFor = filePathId[:filePathId.rfind(".")]
                    self._docs[filePackage] = {
                        "relpath": fileRel,
                        "path": filePath,
                        "encoding": encoding,
                        "namespace": self._namespace,
                        "id": filePathId,
                        "package": filePackage
                    }

                    # Stop further processing
                    continue

                # Ignore non-script
                if os.path.splitext(fileName)[-1] != ".js":
                    continue

                # Read content
                fileContent = filetool.read(filePath, encoding)

                # Extract code ID (e.g. class name, mixin name, ...)
                fileCodeId = self._getCodeId(fileContent)

                # Ignore all data files (e.g. translation, doc files, ...)
                if fileCodeId == None:
                    continue

                # Compare path and content
                if fileCodeId != filePathId:
                    self._console.error(
                        "Detected conflict between filename and classname!")
                    self._console.indent()
                    self._console.error("Classname: %s" % fileCodeId)
                    self._console.error("Path: %s" % fileRel)
                    self._console.outdent()
                    sys.exit(1)

                # Store file data
                self._classes[filePathId] = {
                    "relpath": fileRel,
                    "path": filePath,
                    "uri": fileUri,
                    "encoding": encoding,
                    "namespace": self._namespace,
                    "id": filePathId,
                    "package": filePackage
                }

        self._console.indent()
        self._console.debug("Found %s classes" % len(self._classes))
        self._console.debug("Found %s docs" % len(self._docs))
        self._console.outdent()
コード例 #57
0
def main():
    parser = optparse.OptionParser(option_class=ExtendAction)

    usage_str = '''%prog [options] file.js,...'''
    parser.set_usage(usage_str)

    # General flags
    parser.add_option("-v",
                      "--verbose",
                      action="store_true",
                      dest="verbose",
                      default=False,
                      help="verbose output mode (extra verbose)")
    parser.add_option("-q",
                      "--quiet",
                      action="store_true",
                      dest="quiet",
                      default=False,
                      help="quiet output")

    # Optimization flags
    parser.add_option("-n",
                      "--variables",
                      action="store_true",
                      dest="variables",
                      default=False,
                      help="optimize variables")
    parser.add_option("-s",
                      "--strings",
                      action="store_true",
                      dest="strings",
                      default=False,
                      help="optimize strings")
    parser.add_option("-p",
                      "--privates",
                      action="store_true",
                      dest="privates",
                      default=False,
                      help="optimize privates")
    parser.add_option("-b",
                      "--basecalls",
                      action="store_true",
                      dest="basecalls",
                      default=False,
                      help="optimize basecalls")
    parser.add_option("-i",
                      "--inline",
                      action="store_true",
                      dest="inline",
                      default=False,
                      help="optimize inline")
    parser.add_option("--all",
                      action="store_true",
                      dest="all",
                      default=False,
                      help="optimize all")

    # Variant support
    parser.add_option("--variant",
                      action="extend",
                      dest="variants",
                      metavar="KEY:VALUE",
                      type="string",
                      default=[],
                      help="Selected variants")

    # Action modifier
    parser.add_option("--pretty",
                      action="store_true",
                      dest="pretty",
                      default=False,
                      help="print out pretty printed")
    parser.add_option("--tree",
                      action="store_true",
                      dest="tree",
                      default=False,
                      help="print out tree")
    parser.add_option("--lint",
                      action="store_true",
                      dest="lint",
                      default=False,
                      help="ecmalint the file")

    # Cache support
    parser.add_option("-c",
                      "--cache",
                      dest="cache",
                      metavar="CACHEPATH",
                      type="string",
                      default="",
                      help="path to cache directory")
    parser.add_option("--privateskey",
                      dest="privateskey",
                      metavar="CACHEKEY",
                      type="string",
                      default="",
                      help="cache key for privates")

    #
    # Process arguments
    #
    (options, args) = parser.parse_args(sys.argv[1:])

    if len(args) == 0:
        print ">>> Missing filename!"
        return

    if not options.quiet:
        print ">>> Parsing file..."
    fileName = args[0]
    fileContent = filetool.read(fileName, "utf-8")
    fileId = "xxx"
    tokens = tokenizer.parseStream(fileContent, fileName)

    if not options.quiet:
        print ">>> Creating tree..."
    tree = treegenerator.createSyntaxTree(tokens)

    #
    # Optimizing tree
    #

    if len(options.variants) > 0:
        if not options.quiet:
            print ">>> Selecting variants..."
        varmap = {}
        for entry in options.variants:
            pos = entry.index(":")
            varmap[entry[0:pos]] = entry[pos + 1:]

        variantoptimizer.search(tree, varmap, fileId)

    if options.all or options.basecalls:
        if not options.quiet:
            print ">>> Optimizing basecalls..."
        basecalloptimizer.patch(tree)

    if options.all or options.inline:
        if not options.quiet:
            print ">>> Optimizing inline..."
        inlineoptimizer.patch(tree)

    if options.all or options.strings:
        if not options.quiet:
            print ">>> Optimizing strings..."
        _optimizeStrings(tree, fileId)

    if options.all or options.variables:
        if not options.quiet:
            print ">>> Optimizing variables..."
        variableoptimizer.search(tree)

    if options.all or options.privates:
        if not options.quiet:
            print ">>> Optimizing privates..."
        if options.cache:
            cache = Cache(options.cache, Log())
            privates, _ = cache.read(options.privateskey)
            if privates != None:
                privateoptimizer.load(privates)
        privateoptimizer.patch(tree, fileId)
        if options.cache:
            cache.write(options.privateskey, privateoptimizer.get())

    #
    # Output the result
    #

    if options.lint:
        if not options.quiet:
            print ">>> Executing ecmalint..."
        print "Needs implementation"

    elif options.tree:
        if not options.quiet:
            print ">>> Printing out tree..."
        print tree.toXml().encode('utf-8')

    else:
        if not options.quiet:
            print ">>> Compiling..."
        compiled = _compileTree(tree, options.pretty)
        print compiled.encode('utf-8')
コード例 #58
0
    def _scanClassPath(self, timeOfLastScan=0):

        codeIdFromTree = True  # switch between regex- and tree-based codeId search

        # Check class path
        classPath = os.path.join(self.path, self.classPath)
        if not os.path.isdir(classPath):
            raise ConfigurationError(
                "Class path from Manifest doesn't exist: %s" % self.classPath)

        # Check multiple namespaces
        if not len([d for d in os.listdir(classPath) if not d.startswith(".")
                    ]) == 1:
            self._console.warn(
                "The class path must contain exactly one namespace; ignoring everything else: '%s'"
                % (classPath, ))

        # Check Manifest namespace matches file system
        nsPrefix = self.namespace.replace(".", os.sep)
        classNSRoot = os.path.join(classPath, nsPrefix)
        if not os.path.isdir(classNSRoot):
            raise ValueError(
                "Manifest namespace does not exist on file system:  '%s'" %
                (classNSRoot))

        self._console.debug("Scanning class folder...")

        classList = []
        existClassIds = dict([(x.id, x)
                              for x in self._classes])  # if we scanned before
        docs = {}

        # TODO: Clazz still relies on a context dict!
        contextdict = {}
        contextdict["console"] = context.console
        contextdict["cache"] = context.cache
        contextdict["jobconf"] = context.jobconf
        contextdict["envchecksmap"] = {}

        # Iterate...
        for root, dirs, files in filetool.walk(classNSRoot):
            # Filter ignored directories
            for ignoredDir in dirs:
                if self._ignoredDirEntries.match(ignoredDir):
                    dirs.remove(ignoredDir)

            # Add good directories
            currNameSpace = root[len(classNSRoot + os.sep):]
            currNameSpace = currNameSpace.replace(os.sep,
                                                  ".")  # TODO: var name

            # Searching for files
            for fileName in files:
                # Ignore dot files
                if fileName.startswith(".") or self._ignoredDirEntries.match(
                        fileName):
                    continue
                self._console.dot()

                # Process path data
                filePath = os.path.join(root, fileName)
                fileRel = filePath.replace(
                    classNSRoot + os.sep,
                    "")  # now only path fragment *afte* NS
                fileExt = os.path.splitext(fileName)[-1]
                fileStat = os.stat(filePath)
                fileSize = fileStat.st_size
                fileMTime = fileStat.st_mtime

                # Compute full URI from relative path
                fileUri = self.classUri + "/" + fileRel.replace(os.sep, "/")

                # Compute identifier from relative path
                filePathId = fileRel.replace(fileExt, "").replace(os.sep, ".")
                filePathId = self.namespace + "." + filePathId  # e.g. "qx.core.Environment"
                filePathId = unidata.normalize(
                    "NFC", filePathId)  # combine combining chars: o" -> ö
                fileId = nsPrefix + "/" + fileRel  # e.g. "qx/core/Environment.js"

                # check if known and fresh
                if (filePathId in existClassIds
                        and fileMTime < timeOfLastScan):
                    classList.append(existClassIds[filePathId])
                    #print "re-using existing", filePathId
                    continue  # re-use known class

                # Extract package ID
                filePackage = filePathId[:filePathId.rfind(".")]

                # Handle doc files
                if fileName == self._docFilename:
                    fileFor = filePathId[:filePathId.rfind(".")]
                    docs[filePackage] = {
                        "relpath": fileId,
                        "path": filePath,
                        "encoding": self.encoding,
                        "namespace": self.namespace,
                        "id": filePathId,
                        "package": filePackage,
                        "size": fileSize
                    }

                    # Stop further processing
                    continue

                # Ignore non-script
                if os.path.splitext(fileName)[-1] != ".js":
                    continue

                if filePathId == "qx.core.Environment":
                    clazz = qcEnvClass(filePathId, filePath, self, contextdict)
                else:
                    clazz = Class(filePathId, filePath, self, contextdict)

                # Extract code ID (e.g. class name, mixin name, ...)
                try:
                    if codeIdFromTree:
                        fileCodeId = self._getCodeId(clazz)
                    else:
                        # Read content
                        fileContent = filetool.read(filePath, self.encoding)
                        fileCodeId = self._getCodeId1(fileContent)
                except ValueError, e:
                    argsList = []
                    for arg in e.args:
                        argsList.append(arg)
                    argsList[0] = argsList[0] + u' (%s)' % fileName
                    e.args = tuple(argsList)
                    raise e

                # Ignore all data files (e.g. translation, doc files, ...)
                if fileCodeId == None:
                    continue

                # Compare path and content
                if fileCodeId != filePathId:
                    self._console.error(
                        "Detected conflict between filename and classname!")
                    self._console.indent()
                    self._console.error("Classname: %s" % fileCodeId)
                    self._console.error("Path: %s" % filePath)
                    self._console.outdent()
                    raise RuntimeError()

                # Store file data
                self._console.debug("Adding class %s" % filePathId)
                clazz.encoding = self.encoding
                clazz.size = fileSize  # dependency logging uses this
                clazz.package = filePackage  # Apiloader uses this
                clazz.relpath = fileId  # Locale uses this
                clazz.m_time_ = fileStat.st_mtime
                classList.append(clazz)
コード例 #59
0
    def _scanClassPath(self, path, uri, encoding):
        if not os.path.exists(path):
            raise ValueError("The given class path does not exist: %s" % path)

        self._console.debug("Scanning class folder...")

        classList = {}
        docs = {}

        # Iterate...
        for root, dirs, files in filetool.walk(path):
            # Filter ignored directories
            for ignoredDir in dirs:
                if self._ignoredDirectories.match(ignoredDir):
                    dirs.remove(ignoredDir)

            # Add good directories
            currNameSpace = root[len(path + os.sep):]
            currNameSpace = currNameSpace.replace(os.sep, ".")

            # Searching for files
            for fileName in files:
                # Ignore dot files
                if fileName.startswith("."):
                    continue

                # Process path data
                filePath = os.path.join(root, fileName)
                fileRel = filePath.replace(path + os.sep, "")
                fileExt = os.path.splitext(fileName)[-1]
                fileSize = os.stat(filePath).st_size

                # Compute full URI from relative path
                fileUri = uri + "/" + fileRel.replace(os.sep, "/")

                # Compute identifier from relative path
                filePathId = fileRel.replace(fileExt, "").replace(os.sep, ".")

                # Extract package ID
                filePackage = filePathId[:filePathId.rfind(".")]

                # Handle doc files
                if fileName == self._docFilename:
                    fileFor = filePathId[:filePathId.rfind(".")]
                    docs[filePackage] = {
                        "relpath": fileRel,
                        "path": filePath,
                        "encoding": encoding,
                        "namespace": self.namespace,
                        "id": filePathId,
                        "package": filePackage,
                        "size": fileSize
                    }

                    # Stop further processing
                    continue

                # Ignore non-script
                if os.path.splitext(fileName)[-1] != ".js":
                    continue

                # Read content
                fileContent = filetool.read(filePath, encoding)

                # Extract code ID (e.g. class name, mixin name, ...)
                try:
                    fileCodeId = self._getCodeId(fileContent)
                except ValueError, e:
                    e.args[0] = e.args[0] + u' (%s)' % fileName
                    raise e

                # Ignore all data files (e.g. translation, doc files, ...)
                if fileCodeId == None:
                    continue

                # Compare path and content
                if fileCodeId != filePathId:
                    self._console.error(
                        "Detected conflict between filename and classname!")
                    self._console.indent()
                    self._console.error("Classname: %s" % fileCodeId)
                    self._console.error("filePathId: %s" % filePathId)
                    self._console.error("Path: %s" % fileRel)
                    self._console.outdent()
                    raise RuntimeError()

                # Store file data
                self._console.debug("Adding class %s" % filePathId)
                classList[filePathId] = {
                    "relpath": fileRel,
                    "path": filePath,
                    "encoding": encoding,
                    "namespace": self.namespace,
                    "id": filePathId,
                    "package": filePackage,
                    "size": fileSize
                }
                # TODO: Clazz still relies on a context dict!
                contextdict = {}
                contextdict["console"] = context.console
                contextdict["cache"] = context.cache
                contextdict["jobconf"] = context.jobconf
                # TODO: currently creation of throw-away objects (unless they're .append'ed)
                clazz = Class(classList[filePathId], filePath, self,
                              contextdict, self._classesObj)
                clazz.encoding = encoding
                clazz.size = fileSize  # dependency logging uses this
                clazz.package = filePackage  # Apiloader uses this