Example #1
0
    def depsToFlareFile(classDepsIter, depsLogConf):
        data = {}
        for (packageId, classId, depId, loadOrRun) in classDepsIter:
            if classId not in data:
                data[classId] = {}
                data[classId]['name'] = classId
                data[classId]["size"] = 1000
                data[classId]["imports"] = []

            if loadOrRun == 'load':
                data[classId]['imports'].append(depId)

        output = []
        for cid in data.keys():
            output.append(data[cid])

        file = depsLogConf.get('flare/file', "flare.json")
        console.info("Writing dependency data to file: %s" % file)
        pretty = depsLogConf.get('flare/pretty', None)
        if pretty:
            indent = 2
            separators = (', ', ': ')
        else:
            indent = None
            separators = (',', ':')
        filetool.save(file, json.dumps(output, sort_keys=True, indent=indent, separators=separators))

        return
Example #2
0
    def _expandString(self, s, mapstr, mapbin):
        assert isinstance(s, types.StringTypes)
        if not Key.hasMacro(s):  # optimization: no macro -> return
            return s
        macro = ""
        sub   = ""
        possiblyBin = re.match(r'^\${(.*)}$', s)   # look for '${...}' as a bin replacement
        if possiblyBin:
            macro = possiblyBin.group(1)

        if macro and (macro in mapbin.keys()):
            replval = mapbin[macro]
            if isinstance(replval, types.DictType):
                sub = copy.deepcopy(replval)  # make sure macro values are not affected during value merges later
            else:
                sub = replval   # array references are ok for now
        else:
            templ = string.Template(s)
            # allow stringyfied value of bin macros to be spliced into result value
            mapall = mapstr.copy()
            mapall.update(dict((k,json.dumps(v)) for (k,v) in mapbin.items()))
            try:
                sub = templ.substitute(mapall)
            except KeyError, e:
                raise ValueError("Macro left undefined in job (%s): '%s'\n(might be from an included config)" % (self.name, e.args[0]))
Example #3
0
    def depsToJsonFile(classDepsIter, depsLogConf):
        data = {}
        for (packageId, classId, depId, loadOrRun) in classDepsIter:
            if classId not in data:
                data[classId] = {}
                data[classId]["load"] = []
                data[classId]["run"] = []

            data[classId][loadOrRun].append(depId)

        file = depsLogConf.get('json/file', "deps.json")
        console.info("Writing dependency data to file: %s" % file)
        pretty = depsLogConf.get('json/pretty', None)
        if pretty:
            indent = 2
            separators = (', ', ': ')
        else:
            indent = None
            separators = (',', ':')
        filetool.save(
            file,
            json.dumps(data,
                       sort_keys=True,
                       indent=indent,
                       separators=separators))

        return
Example #4
0
    def depsToFlareFile(classDepsIter, depsLogConf):
        data = {}
        for (packageId, classId, depId, loadOrRun) in classDepsIter:
            if classId not in data:
                data[classId] = {}
                data[classId]['name'] = classId
                data[classId]["size"] = 1000
                data[classId]["imports"] = []

            if loadOrRun == 'load':
                data[classId]['imports'].append(depId)

        output = []
        for cid in data.keys():
            output.append(data[cid])

        file = depsLogConf.get('flare/file', "flare.json")
        console.info("Writing dependency data to file: %s" % file)
        pretty = depsLogConf.get('flare/pretty', None)
        if pretty:
            indent = 2
            separators = (', ', ': ')
        else:
            indent = None
            separators = (',', ':')
        filetool.save(
            file,
            json.dumps(output,
                       sort_keys=True,
                       indent=indent,
                       separators=separators))

        return
Example #5
0
File: Job.py Project: ryanmar/next
    def _expandString(self, s, mapstr, mapbin):
        assert isinstance(s, types.StringTypes)
        if not Key.hasMacro(s):  # optimization: no macro -> return
            return s
        macro = ""
        sub = ""
        possiblyBin = re.match(r'^\${(.*)}$',
                               s)  # look for '${...}' as a bin replacement
        if possiblyBin:
            macro = possiblyBin.group(1)

        if macro and (macro in mapbin.keys()):
            replval = mapbin[macro]
            if isinstance(replval, types.DictType):
                sub = copy.deepcopy(
                    replval
                )  # make sure macro values are not affected during value merges later
            else:
                sub = replval  # array references are ok for now
        else:
            templ = string.Template(s)
            # allow stringyfied value of bin macros to be spliced into result value
            mapall = mapstr.copy()
            mapall.update(dict(
                (k, json.dumps(v)) for (k, v) in mapbin.items()))
            try:
                sub = templ.substitute(mapall)
            except KeyError, e:
                raise ValueError(
                    "Macro left undefined in job (%s): '%s'\n(might be from an included config)"
                    % (self.name, e.args[0]))
Example #6
0
        def _getCompileCommand(clazz, variants, optimize, format_):

            def getToolBinPath():
                path = sys.argv[0]
                path = os.path.abspath(os.path.normpath(os.path.dirname(path)))
                return path

            m   = {}
            cmd = ""
            toolBinPath      = getToolBinPath()
            m['compilePath'] = os.path.join(toolBinPath, "compile.py -q")
            m['filePath']    = os.path.normpath(clazz.path)
            # optimizations
            optis = []
            for opti in optimize:
                optis.append("--" + opti)
            m['optimizations'] = " ".join(optis)
            # variants
            varis = []
            for vari in variants:
                varis.append("--variant=" + vari + ":" + json.dumps(variants[vari]))
            m['variants'] = " ".join(varis)
            m['cache'] = "-c " + self._cache._path  # Cache needs context object, interrupt handler,...
            # compile.py could read the next from privateoptimizer module directly
            m['privateskey'] = "--privateskey " + '"' + privateoptimizer.privatesCacheId + '"'

            cmd = "%(compilePath)s %(optimizations)s %(variants)s %(cache)s %(privateskey)s %(filePath)s" % m
            return cmd
Example #7
0
def runFontMap(jobconf, confObj):

    if not jobconf.get("font-map", False):
        return

    console = Context.console
    cache = Context.cache

    # Test for fontforge
    try:
        import fontforge
    except ImportError:
        console.error(
            "Font map generation is not possible: fontforge is missing")
        return

    console.info("Generating font map...")
    console.indent()

    done = []

    fonts = jobconf.get("font-map/fonts", {})
    for font, fontspec in fonts.iteritems():
        alias = fontspec["alias"] or font.fontfamily

        if alias in done:
            continue
        done.append(alias)

        config = {
            "alias": alias or font.fontfamily,
            "size": fontspec["size"] or 48,
            "mapping": {}
        }

        fo = fontforge.open(font)

        for glyph in fo:
            go = fo[glyph]
            if go.unicode > 0:
                config["mapping"][go.glyphname] = [
                    go.unicode,
                    round(go.width / float(go.vwidth), 3)
                ]

        # store meta data for this font
        bname = os.path.basename(font)
        ri = bname.rfind('.')
        if ri > -1:
            bname = bname[:ri]
        bname += '.meta'
        meta_fname = os.path.join(os.path.dirname(font), bname)
        console.debug("writing meta file %s" % meta_fname)
        filetool.save(meta_fname,
                      json.dumps(config, ensure_ascii=False, sort_keys=True))

    console.outdent()
    return
Example #8
0
def _send_message_raw(dst, data):
    amqp_message = amqp.Message(json.dumps(data))
    params = connection_settings()
    with amqp.Connection(**params) as conn:
        with conn.channel() as chan:
            chan = conn.channel()
            amqp_message.properties[
                'delivery_mode'] = 2  # message is persistent
            chan.basic_publish(amqp_message,
                               settings.AMQP_EXCHANGE_NAME,
                               routing_key=dst,
                               mandatory=True)
Example #9
0
def runFontMap(jobconf, confObj):

    if not jobconf.get("font-map", False):
        return

    console = Context.console
    cache = Context.cache

    # Test for fontforge
    try:
        import fontforge
    except ImportError:
        console.error("Font map generation is not possible: fontforge is missing")
        return

    console.info("Generating font map...")
    console.indent()

    done = []

    fonts = jobconf.get("font-map/fonts", {})
    for font, fontspec in fonts.iteritems():
        alias = fontspec["alias"] or font.fontfamily

        if alias in done:
            continue
        done.append(alias);

        config = {
          "alias" : alias or font.fontfamily,
          "size" : fontspec["size"] or 48,
          "mapping" : {}
        }

        fo = fontforge.open(font)

        for glyph in fo:
            go = fo[glyph]
            if go.unicode > 0:
                config["mapping"][go.glyphname] = [go.unicode, round(go.width / float(go.vwidth), 3)]

        # store meta data for this font
        bname = os.path.basename(font)
        ri = bname.rfind('.')
        if ri > -1:
            bname = bname[:ri]
        bname += '.meta'
        meta_fname = os.path.join(os.path.dirname(font), bname)
        console.debug("writing meta file %s" % meta_fname)
        filetool.save(meta_fname, json.dumps(config, ensure_ascii=False, sort_keys=True))

    console.outdent()
    return
def main():
    apidata = {}
    apidata['type'] = 'doctree'
    apidata['children'] = []
    apidata['attributes'] = {}
    apidata['children'].append({
        "type": "packages",
        "attributes": {},
        "children": []
    })
    filetool.directory(store_path)

    dirwalker = filetool.find(module_root, r'\.py$')

    for pyfile in dirwalker:
        #if os.stat(pyfile).st_size == 0:
        #    continue
        # get the file's api doc as json
        filejson = pyapi2json(pyfile)
        apipackage = file2package(pyfile, module_root)
        # and store it
        filetool.save(store_path + '/' + apipackage + '.json', filejson)
        # make an entry in apidata struct
        levels = apipackage.split('.')
        curr = apidata['children'][0]['children']
        for pos, level in enumerate(levels):
            if level not in (x['attributes']['name'] for x in curr
                             if 'name' in x['attributes']):
                newentry = {
                    "children": [],
                    "type": "packages" if pos % 2 else "package",
                    "attributes": {
                        "packageName": ".".join(levels[:pos]),
                        "name": level,
                        "fullName": ".".join(levels[:pos + 1])
                    }
                }
                if pos == len(levels) - 1:
                    newentry["externalRef"] = True
                    #del newentry['children']
                    #newentry["type"] = "classes"
                    pass
                curr.append(newentry)
                curr = newentry['children']
            else:
                curr = [
                    x['children'] for x in curr
                    if x['attributes']['name'] == level
                ][0]

    # store apidata
    filetool.save(store_path + '/' + "apidata.json", json.dumps(apidata))
Example #11
0
        def enabler(request, *args, **kwargs):
            try:
                retval = view(request, *args, **kwargs)
                if isinstance(retval, basestring):
                    return api_ok(retval)
                elif hasattr(retval, 'items'):
                    return api_ok(json.dumps(retval))
                else:
                    raise ValueError("Expected dict-like or string, got '%s'" %
                                     type(retval))

            except BaseException, e:
                log_exception()
                return api_exception(e)
Example #12
0
def main():
    apidata = {}
    apidata['type'] = 'doctree'
    apidata['children'] = []
    apidata['attributes'] = {}
    apidata['children'].append({
      "type":"packages","attributes":{},"children":[]  
    })
    filetool.directory(store_path)

    dirwalker = filetool.find(module_root, r'\.py$')

    for pyfile in dirwalker:
        #if os.stat(pyfile).st_size == 0:
        #    continue
        # get the file's api doc as json
        filejson = pyapi2json(pyfile)
        apipackage = file2package(pyfile, module_root)
        # and store it
        filetool.save(store_path+'/'+apipackage+'.json', filejson)
        # make an entry in apidata struct
        levels = apipackage.split('.')
        curr = apidata['children'][0]['children']
        for pos,level in enumerate(levels):
            if level not in (x['attributes']['name'] for x in curr if 'name' in x['attributes']):
                newentry = {
                    "children" : [],
                    "type" : "packages" if pos % 2 else "package",
                    "attributes" : {
                        "packageName" : ".".join(levels[:pos]),
                        "name" : level,
                        "fullName" : ".".join(levels[:pos+1])
                    }
                }
                if pos==len(levels)-1:
                    newentry["externalRef"] = True
                    #del newentry['children']
                    #newentry["type"] = "classes"
                    pass
                curr.append(newentry)
                curr = newentry['children']
            else:
                curr = [x['children'] for x in curr if x['attributes']['name']==level][0]
        

    # store apidata
    filetool.save(store_path+'/'+"apidata.json", json.dumps(apidata))
Example #13
0
def runSimulation(jobconf):
    console = Context.console
    console.info("Running Simulation...")

    argv    = []
    javaBin = "java"
    javaClassPath = "-cp"
    argv.extend((javaBin, javaClassPath))

    configClassPath = jobconf.get("simulate/java-classpath", [])
    qxSeleniumPath = jobconf.get("simulate/qxselenium-path", False)
    if qxSeleniumPath:
        configClassPath.append(qxSeleniumPath)

    classPathSeparator = ":"
    if util.getPlatformInfo()[0] == "Windows":
        classPathSeparator = ";"

    configClassPath = classPathSeparator.join(configClassPath)

    if "CYGWIN" in util.getPlatformInfo()[0]:
        configClassPath = "`cygpath -wp " + configClassPath + "`"

    argv.append(configClassPath)

    rhinoClass = jobconf.get("simulate/rhino-class", "org.mozilla.javascript.tools.shell.Main")
    runnerScript = jobconf.get("simulate/simulator-script")
    argv.extend((rhinoClass, runnerScript))

    cmd = " ".join(textutil.quoteCommandArgs(argv))

    settings = jobconf.get("environment", None)
    for key in settings:
        if type(settings[key]) == unicode:
            settings[key] = settings[key].replace(" ", "$")
    if settings:
        settings = json.dumps(settings, separators=(",", ":"))
        settings = settings.replace('"','\\"').replace("{", "\{").replace("}", "\}")
        settings = "settings=" + settings
        cmd += " " + settings

    console.debug("Selenium start command: " + cmd)
    shell = ShellCmd()
    shell.execute_logged(cmd, console, True)
Example #14
0
        def printVariantInfo(variantSetNum, variants, variantSets, variantData):
            if len(variantSets) < 2:  # only log when more than 1 set
                return
            variantStr = json.dumps(variants,ensure_ascii=False)
            self._console.head("Processing variant set %s/%s" % (variantSetNum+1, len(variantSets)))

            # Debug variant combination
            hasVariants = False
            for key in variants:
                if len(variantData[key]) > 1:
                    hasVariants = True

            if hasVariants:
                self._console.info("Switched variants:")
                self._console.indent()
                for key in variants:
                    if len(variantData[key]) > 1:
                        self._console.info("%s = %s" % (key, variants[key]))
                self._console.outdent()

            return
Example #15
0
    def depsToJsonFile(classDepsIter, depsLogConf):
        data = {}
        for (packageId, classId, depId, loadOrRun) in classDepsIter:
            if classId not in data:
                data[classId] = {}
                data[classId]["load"] = []
                data[classId]["run"] = []

            data[classId][loadOrRun].append(depId)

        file = depsLogConf.get('json/file', "deps.json")
        console.info("Writing dependency data to file: %s" % file)
        pretty = depsLogConf.get('json/pretty', None)
        if pretty:
            indent     = 2
            separators = (', ', ': ')
        else:
            indent     = None
            separators = (',', ':')
        filetool.save(file, json.dumps(data, sort_keys=True, indent=indent, separators=separators))

        return
Example #16
0
        def printVariantInfo(variantSetNum, variants, variantSets,
                             variantData):
            if len(variantSets) < 2:  # only log when more than 1 set
                return
            variantStr = json.dumps(variants, ensure_ascii=False)
            self._console.head("Processing variant set %s/%s" %
                               (variantSetNum + 1, len(variantSets)))

            # Debug variant combination
            hasVariants = False
            for key in variants:
                if len(variantData[key]) > 1:
                    hasVariants = True

            if hasVariants:
                self._console.info("Switched variants:")
                self._console.indent()
                for key in variants:
                    if len(variantData[key]) > 1:
                        self._console.info("%s = %s" % (key, variants[key]))
                self._console.outdent()

            return
Example #17
0
    def depsToProviderFormat(classDepsIter, depsLogConf):
        ##
        # duplicates CodeProvider.passesOutputFilter
        def passesOutputFilter(resId):
            # must match some include expressions
            if not filter(None, [x.search(resId) for x in inclregexps]):  # [None, None, _sre.match, None, _sre.match, ...]
                return False
            # must not match any exclude expressions
            if filter(None, [x.search(resId) for x in exclregexps]):
                return False
            return True

        # ---------------------------------------

        inclregexps = jobconf.get("provider/include", ["*"])
        exclregexps = jobconf.get("provider/exclude", [])
        inclregexps = map(textutil.toRegExp, inclregexps)
        exclregexps = map(textutil.toRegExp, exclregexps)
        replace_dots = depsLogConf.get("json/replace-dots-in", [])
        slashes_keys = 'keys' in replace_dots
        slashes_vals = 'values' in replace_dots

        classToDeps = {}
        # Class deps
        for (packageId, classId, depId, loadOrRun) in classDepsIter:
            if passesOutputFilter(classId):
                if classId not in classToDeps:
                    classToDeps[classId] = {}
                    classToDeps[classId]["load"] = []
                    classToDeps[classId]["run"] = []
                if depId != None:
                    classToDeps[classId][loadOrRun].append(depId)

        if slashes_vals:
            # transform dep items
            for key, val in classToDeps.items():
                newval = []
                for ldep in val["load"]:
                    newdep = ldep.replace(".", "/")
                    newval.append(newdep)
                val["load"] = newval
                newval = []
                for ldep in val["run"]:
                    newdep = ldep.replace(".", "/")
                    newval.append(newdep)
                val["run"] = newval

        # Resource deps
        # class list
        classObjs = [x for x in script.classesObj if x.id in classToDeps.keys()]
        # map resources to class.resources
        classObjs = Class.mapResourcesToClasses(script.libraries, classObjs, jobconf.get("asset-let", {}))

        for clazz in classObjs:
            reskeys = ["/resource/resources#"+x.id for x in clazz.resources]
            classToDeps[clazz.id]["run"].extend(reskeys)

        # Message key deps
        for classId in classToDeps:
            #classKeys, _ = Locale.getTranslation(classId, {})
            classKeys, _ = script.classesAll[classId].messageStrings({})
            transIds  = set(x['id'] for x in classKeys) # get the msgid's, uniquely
            transIds.update(x['plural'] for x in classKeys if 'plural' in x) # add plural keys
            transKeys = ["/translation/i18n-${lang}#" + x for x in transIds]
            classToDeps[classId]["run"].extend(transKeys)

        # CLDR dependency
        for classId in classToDeps:
            if script.classesAll[classId].getHints("cldr"):
                classToDeps[classId]["run"].append("/locale/locale-${lang}#cldr")

        if slashes_keys:
            # transform dep keys ("qx.Class" -> "qx/Class.js")
            for key, val in classToDeps.items():
                if key.find(".")>-1:
                    newkey = key.replace(".", "/")
                    classToDeps[newkey] = classToDeps[key]
                    del classToDeps[key]

        # sort information for each class (for stable output)
        for classvals in classToDeps.values():
            for key in classvals:
                classvals[key] = sorted(classvals[key], reverse=True)

        # write to file
        file_ = depsLogConf.get('json/file', "deps.json")
        console.info("Writing dependency data to file: %s" % file_)
        pretty = depsLogConf.get('json/pretty', None)
        if pretty:
            indent     = 2
            separators = (', ', ': ')
        else:
            indent     = None
            separators = (',', ':')
        filetool.save(file_, json.dumps(classToDeps, sort_keys=True, indent=indent, separators=separators))

        return
Example #18
0
def json_response(code, message=''):
    return HttpResponse(json.dumps({
        "result": code,
        "message": message
    }),
                        mimetype='application/javascript')
Example #19
0
    def depsToProviderFormat(classDepsIter, depsLogConf):
        ##
        # duplicates CodeProvider.passesOutputFilter
        def passesOutputFilter(resId):
            # must match some include expressions
            if not filter(None, [
                    x.search(resId) for x in inclregexps
            ]):  # [None, None, _sre.match, None, _sre.match, ...]
                return False
            # must not match any exclude expressions
            if filter(None, [x.search(resId) for x in exclregexps]):
                return False
            return True

        # ---------------------------------------

        inclregexps = jobconf.get("provider/include", ["*"])
        exclregexps = jobconf.get("provider/exclude", [])
        inclregexps = map(textutil.toRegExp, inclregexps)
        exclregexps = map(textutil.toRegExp, exclregexps)
        replace_dots = depsLogConf.get("json/replace-dots-in", [])
        slashes_keys = 'keys' in replace_dots
        slashes_vals = 'values' in replace_dots

        classToDeps = {}
        # Class deps
        for (packageId, classId, depId, loadOrRun) in classDepsIter:
            if passesOutputFilter(classId):
                if classId not in classToDeps:
                    classToDeps[classId] = {}
                    classToDeps[classId]["load"] = []
                    classToDeps[classId]["run"] = []
                if depId != None:
                    classToDeps[classId][loadOrRun].append(depId)

        if slashes_vals:
            # transform dep items
            for key, val in classToDeps.items():
                newval = []
                for ldep in val["load"]:
                    newdep = ldep.replace(".", "/")
                    newval.append(newdep)
                val["load"] = newval
                newval = []
                for ldep in val["run"]:
                    newdep = ldep.replace(".", "/")
                    newval.append(newdep)
                val["run"] = newval

        # Resource deps
        # class list
        classObjs = [
            x for x in script.classesObj if x.id in classToDeps.keys()
        ]
        # map resources to class.resources
        classObjs = Class.mapResourcesToClasses(script.libraries, classObjs,
                                                jobconf.get("asset-let", {}))

        for clazz in classObjs:
            reskeys = ["/resource/resources#" + x.id for x in clazz.resources]
            classToDeps[clazz.id]["run"].extend(reskeys)

        # Message key deps
        for classId in classToDeps:
            #classKeys, _ = Locale.getTranslation(classId, {})
            classKeys, _ = script.classesAll[classId].messageStrings({})
            transIds = set(x['id']
                           for x in classKeys)  # get the msgid's, uniquely
            transIds.update(x['plural'] for x in classKeys
                            if 'plural' in x)  # add plural keys
            transKeys = ["/translation/i18n-${lang}#" + x for x in transIds]
            classToDeps[classId]["run"].extend(transKeys)

        # CLDR dependency
        for classId in classToDeps:
            if script.classesAll[classId].getHints("cldr"):
                classToDeps[classId]["run"].append(
                    "/locale/locale-${lang}#cldr")

        if slashes_keys:
            # transform dep keys ("qx.Class" -> "qx/Class.js")
            for key, val in classToDeps.items():
                if key.find(".") > -1:
                    newkey = key.replace(".", "/")
                    classToDeps[newkey] = classToDeps[key]
                    del classToDeps[key]

        # sort information for each class (for stable output)
        for classvals in classToDeps.values():
            for key in classvals:
                classvals[key] = sorted(classvals[key], reverse=True)

        # write to file
        file_ = depsLogConf.get('json/file', "deps.json")
        console.info("Writing dependency data to file: %s" % file_)
        pretty = depsLogConf.get('json/pretty', None)
        if pretty:
            indent = 2
            separators = (', ', ': ')
        else:
            indent = None
            separators = (',', ':')
        filetool.save(
            file_,
            json.dumps(classToDeps,
                       sort_keys=True,
                       indent=indent,
                       separators=separators))

        return
Example #20
0
    def docTreeToSearchIndex(tree, prefix = "", childPrefix = "  ", newline="\n"):
        types = []
        fullNames = []
        indexs = {}
        currClass = [0]

        def processNode(node,isLeaf):
            # filters
            if not node.hasAttributes():
                return 0  # continue traversal
            if node.type in ['state', 'param', 'see']:  # skip those currently
                return 0
            if "isCtor" in node.attributes and node.attributes["isCtor"]:
                return 0

            # construct a name string
            if 'fullName' in node.attributes:
                longestName = node.attributes['fullName']
            elif 'name' in node.attributes :
                longestName = node.attributes['name']
            else: # cannot handle unnamed entities
                return 0

            if longestName in fullNames:  # don't treat a node twice
                return 0

            # construct type string
            if node.type == "method":
                sfx = ""
                if 'access' in node.attributes:
                    acc = node.attributes['access']
                    if acc == "public":
                        sfx = "_pub"
                    elif acc == 'protected':
                        sfx = '_prot'
                    elif acc == 'private':
                        sfx = '_priv'
                    elif acc == 'internal':
                        sfx = '_intl'
                    else:
                        sfx = '_pub'  # there seem to be methods with weird access attribs
                else:
                    sfx = "_pub"  # force unqualified to public
                n_type = node.type + sfx
            elif node.type == "property":
                sfx = "_pub"
                n_type = node.type + sfx
            else:
                n_type = node.type

            # add type?
            if n_type not in types:
                types.append(n_type)
            tyx = types.index(n_type)

            if node.type in ['class','interface','package','mixin']:
                # add to fullNames - assuming uniqueness
                fullNames.append(longestName)
                fnx = fullNames.index(longestName)
                # commemorate current container
                currClass[0] = fnx
            else:  # must be a class feature
                longestName = '#' + longestName
                fnx = currClass[0]

            # maintain index
            if longestName in indexs:
                indexs[longestName].append([tyx, fnx])
            else:
                indexs[longestName]=[[tyx, fnx]]

            return 0

        tree.nodeTreeMap(processNode)

        index = { "__types__" : types,
                  "__fullNames__" : fullNames,
                  "__index__" : indexs }
        asString = json.dumps(index, separators=(',',':'), sort_keys=True) # compact encoding

        return asString
Example #21
0
    def storeApi(self, include, apiPath, variantSet, verify):
        self._console.info("Generating API data...")
        self._console.indent()

        docTree = tree.Node("doctree")
        length = len(include)

        self._console.info("Loading class docs...", False)
        self._console.indent()

        packages = []
        hasErrors = False
        for pos, fileId in enumerate(include):
            self._console.progress(pos+1, length)
            fileApi = self.getApi(fileId, variantSet)
            if fileApi == None:
                hasErrors = True
            
            # Only continue merging if there were no errors
            if not hasErrors:
                self._mergeApiNodes(docTree, fileApi)
                pkgId = self._classesObj[fileId].package
                # make sure all parent packages are included
                nsparts = pkgId.split('.')
                for i in range(len(nsparts)+1):
                    parentPkg = ".".join(nsparts[0:i])
                    if not parentPkg in packages:
                        packages.append(parentPkg)

        self._console.outdent()

        if hasErrors:
            self._console.error("Found erroneous API information. Please see above. Stopping!")
            return
                
        self._console.info("Loading package docs...")
        self._console.indent()
        
        packages.sort()
        for pkgId in packages:
            self._mergeApiNodes(docTree, self.getPackageApi(pkgId))

        self._console.outdent()

        self._console.info("Connecting classes...")
        api.connectPackage(docTree, docTree)

        self._console.info("Generating search index...")
        index = self.docTreeToSearchIndex(docTree, "", "", "")
        
        if verify and "links" in verify:
            self.verifyLinks(docTree, index)
        
        self._console.info("Saving data...", False)
        self._console.indent()

        packageData = api.getPackageData(docTree)
        packageJson = json.dumps(packageData)
        filetool.save(os.path.join(apiPath, "apidata.json"), packageJson)
        
        length = 0
        for classData in api.classNodeIterator(docTree):
            length += 1
            
        pos = 0
        for classData in api.classNodeIterator(docTree):
            pos += 1
            self._console.progress(pos, length)
            nodeData = tree.getNodeData(classData)
            nodeJson = json.dumps(nodeData)
            fileName = os.path.join(apiPath, classData.get("fullName") + ".json")
            filetool.save(fileName, nodeJson)
            
        self._console.outdent()
            
        self._console.info("Saving index...")
        indexContent = json.dumps(index, separators=(',',':'), sort_keys=True) # compact encoding
        filetool.save(os.path.join(apiPath, "apiindex.json"), indexContent)            

        self._console.outdent()
        self._console.info("Done")
Example #22
0
    def storeApi(self, include, apiPath):
        self._console.info("Generating API data...")
        self._console.indent()

        docTree = tree.Node("doctree")
        length = len(include)

        self._console.info("Loading class docs...", False)
        self._console.indent()

        packages = []
        hasErrors = False
        for pos, fileId in enumerate(include):
            self._console.progress(pos + 1, length)
            fileApi = self.getApi(fileId)
            if fileApi == None:
                hasErrors = True

            # Only continue merging if there were no errors
            if not hasErrors:
                self._mergeApiNodes(docTree, fileApi)
                pkgId = self._classesObj[fileId].package
                # make sure all parent packages are included
                nsparts = pkgId.split('.')
                for i in range(len(nsparts) + 1):
                    parentPkg = ".".join(nsparts[0:i])
                    if not parentPkg in packages:
                        packages.append(parentPkg)

        self._console.outdent()

        if hasErrors:
            self._console.error(
                "Found erroneous API information. Please see above. Stopping!")
            return

        self._console.info("Loading package docs...")
        self._console.indent()

        packages.sort()
        for pkgId in packages:
            self._mergeApiNodes(docTree, self.getPackageApi(pkgId))

        self._console.outdent()

        self._console.info("Connecting classes...")
        api.connectPackage(docTree, docTree)

        self._console.info("Generating search index...")
        indexContent = self.docTreeToSearchIndex(docTree, "", "", "")

        self._console.info("Saving data...", False)
        self._console.indent()

        packageData = api.getPackageData(docTree)
        packageJson = json.dumps(packageData)
        filetool.save(os.path.join(apiPath, "apidata.json"), packageJson)

        length = 0
        for classData in api.classNodeIterator(docTree):
            length += 1

        pos = 0
        for classData in api.classNodeIterator(docTree):
            pos += 1
            self._console.progress(pos, length)
            nodeData = tree.getNodeData(classData)
            nodeJson = json.dumps(nodeData)
            fileName = os.path.join(apiPath,
                                    classData.get("fullName") + ".json")
            filetool.save(fileName, nodeJson)

        self._console.outdent()

        self._console.info("Saving index...")
        filetool.save(os.path.join(apiPath, "apiindex.json"), indexContent)

        self._console.outdent()
        self._console.info("Done")
Example #23
0
def debug(features):
    print
    print json.dumps(features)
    print
Example #24
0
 def __unicode__(self):
     if not self.is_json:
         return self.message
     else:
         return json.dumps(self.json_serializable())
Example #25
0
def CreateDemoJson(dest, qxdir):
    source = []
    build  = []
    scategories = {}
    bcategories = {}

    # Pre-processing
    JSON = {}
    # top-level includes

    default_json = os.path.join(os.path.dirname(os.path.abspath(sys.argv[0])) , 'default.json')
    assert os.path.isfile(default_json)
    JSON['include'] = [{ "path" : "%s" % default_json }]

    # per-demo template file
    tmpl_json = os.path.join(os.path.dirname(sys.argv[0]) , 'tmpl.json')
    tmpl_manifest = os.path.join(os.path.dirname(sys.argv[0]) , TMPL_MANIFEST)
    tmpl_config = os.path.join(os.path.dirname(sys.argv[0]) , TMPL_CONFIG)

    json_tmpl = open(tmpl_json,"rU").read()

    demo_ns = "%s.demo" % namespace

    manifest_tmpl = json.loads(open(tmpl_manifest, 'rU').read())
    manifest_tmpl['provides']['namespace'] = demo_ns

    config_tmpl = json.loads(open(tmpl_config, 'rU').read())
    config_tmpl['let']['QOOXDOO_PATH'] = os.path.join('..', qxdir)
    config_tmpl['jobs']['source-demos']['let']['APPLICATION'] = demo_ns

    fn = os.path.basename(tmpl_manifest)[len('tmpl.'):] # file name
    open(os.path.join(dest, '..', '..', fn), 'w').write(json.dumps(manifest_tmpl, indent=2, sort_keys=True))

    fn = os.path.basename(tmpl_config)[len('tmpl.'):]
    open(os.path.join(dest, '..', '..', fn), 'w').write(json.dumps(config_tmpl, indent=2, sort_keys=True))

    # jobs section
    JSON['jobs'] = {}

    # allow exported jobs to be shadowed
    JSON['config-warnings'] = {}
    shadowed_jobs = []
    JSON['config-warnings']['job-shadowing'] = shadowed_jobs

    # Process demo html files
    while True:
        html = (yield)
        #print html
        if html == None:  # terminate the generator part and go to finalizing json file
            break

        category, name = demoCategoryFromFile(html)
        #print ">>> Processing: %s.%s..." % (category, name)

        # check for demo-specific config file
        config_file = os.path.splitext(html)[0] + ".json"
        if os.path.exists(config_file):
            JSON['include'].append({"path":"%s" % config_file})
            demo_config = json.loadStripComments(config_file)
            shadowed_jobs.extend(demo_config['export'])

        # build classname
        simple = "%s.%s" % (category,name)
        source.append("source-%s" % simple)
        build.append("build-%s" % simple)
        if not category in scategories:
            scategories[category] = []
        scategories[category].append("source-%s" % (simple,))
        if not category in bcategories:
            bcategories[category] = []
        bcategories[category].append("build-%s" % (simple,))

        # concat all
        currcont = json_tmpl.replace('XXX',"%s.%s"%(category,name)).replace("YYY",name).replace("ZZZ",category)
        templatejobs = json.loads("{" + currcont + "}")
        for job,jobval in templatejobs.iteritems():
            JSON['jobs'][job] = jobval

    # Post-processing
    for category in scategories:
        currentry = JSON['jobs']["source-%s" % category] = {}
        currentry['run'] = sorted(scategories[category])
        currentry = JSON['jobs']["build-%s" % category] = {}
        currentry['run'] = sorted(bcategories[category])

    JSON['jobs']["source"] = { "run" : sorted(source) }
    JSON['jobs']["build"]  = { "run" : sorted(build) }

    cont = '// This file is dynamically created by the generator!\n'
    cont += json.dumps(JSON, sort_keys=True, indent=2)
    filetool.save(os.path.join('demobrowser', fJSON), cont)

    yield  # final yield to provide for .send(None) of caller
Example #26
0
def CreateDemoJson():

    source = []
    build = []
    scategories = {}
    bcategories = {}

    # Pre-processing
    JSON = {}
    # top-level includes
    default_json = 'tool' + '/' + 'default.json'
    assert os.path.isfile(default_json)
    JSON['include'] = [{"path": "%s" % default_json}]

    # per-demo template file
    json_tmpl = open(os.path.join('tool', 'tmpl.json'), "rU").read()

    # jobs section
    JSON['jobs'] = {}

    # Process demo html files
    while True:
        html = (yield)
        #print html
        if html == None:  # terminate the generator part and go to finalizing json file
            break

        category, name = demoCategoryFromFile(html)
        #print ">>> Processing: %s.%s..." % (category, name)

        # check for demo-specific config file
        config_file = os.path.splitext(html)[0] + ".json"
        if os.path.exists(config_file):
            JSON['include'].append({"path": "%s" % config_file})

        # build classname
        simple = "%s.%s" % (category, name)
        source.append("source-%s" % simple)
        build.append("build-%s" % simple)
        if not category in scategories:
            scategories[category] = []
        scategories[category].append("source-%s" % (simple, ))
        if not category in bcategories:
            bcategories[category] = []
        bcategories[category].append("build-%s" % (simple, ))

        # concat all
        currcont = json_tmpl.replace('XXX',
                                     "%s.%s" % (category, name)).replace(
                                         "YYY", name).replace("ZZZ", category)
        templatejobs = json.loads("{" + currcont + "}")
        for job, jobval in templatejobs.iteritems():
            JSON['jobs'][job] = jobval

    # Post-processing
    for category in scategories:
        currentry = JSON['jobs']["source-%s" % category] = {}
        currentry['run'] = sorted(scategories[category])
        currentry = JSON['jobs']["build-%s" % category] = {}
        currentry['run'] = sorted(bcategories[category])

    JSON['jobs']["source"] = {"run": sorted(source)}
    JSON['jobs']["build"] = {"run": sorted(build)}

    cont = '// This file is dynamically created by the generator!\n'
    cont += json.dumps(JSON, sort_keys=True, indent=2)
    filetool.save(fJSON, cont)

    yield  # final yield to provide for .send(None) of caller
Example #27
0
def send_message(dst, **ctx):
    _send_message_raw(dst, json.dumps(ctx))
Example #28
0
def CreateDemoJson():

    source = []
    build  = []
    scategories = {}
    bcategories = {}

    # Pre-processing
    JSON = {}
    # top-level includes
    default_json = 'tool' + '/' + 'default.json'
    assert os.path.isfile(default_json)
    JSON['include'] = [{ "path" : "%s" % default_json }]

    # per-demo template file
    json_tmpl = open(os.path.join('tool','tmpl.json'),"rU").read()

    # jobs section
    JSON['jobs'] = {}

    # Process demo html files
    while True:
        html = (yield)
        #print html
        if html == None:  # terminate the generator part and go to finalizing json file
            break

        category, name = demoCategoryFromFile(html)
        #print ">>> Processing: %s.%s..." % (category, name)

        # check for demo-specific config file
        config_file = os.path.splitext(html)[0] + ".json"
        if os.path.exists(config_file):
            JSON['include'].append({"path":"%s" % config_file})

        # build classname
        simple = "%s.%s" % (category,name)
        source.append("source-%s" % simple)
        build.append("build-%s" % simple)
        if not category in scategories:
            scategories[category] = []
        scategories[category].append("source-%s" % (simple,))
        if not category in bcategories:
            bcategories[category] = []
        bcategories[category].append("build-%s" % (simple,))

        # concat all
        currcont = json_tmpl.replace('XXX',"%s.%s"%(category,name)).replace("YYY",name).replace("ZZZ",category)
        templatejobs = json.loads("{" + currcont + "}")
        for job,jobval in templatejobs.iteritems():
            JSON['jobs'][job] = jobval

    # Post-processing
    for category in scategories:
        currentry = JSON['jobs']["source-%s" % category] = {}
        currentry['run'] = sorted(scategories[category])
        currentry = JSON['jobs']["build-%s" % category] = {}
        currentry['run'] = sorted(bcategories[category])

    JSON['jobs']["source"] = { "run" : sorted(source) }
    JSON['jobs']["build"]  = { "run" : sorted(build) }

    cont = '// This file is dynamically created by the generator!\n'
    cont += json.dumps(JSON, sort_keys=True, indent=2)
    filetool.save(fJSON, cont)

    yield  # final yield to provide for .send(None) of caller
Example #29
0
    def docTreeToSearchIndex(tree, prefix="", childPrefix="  ", newline="\n"):
        types = []
        fullNames = []
        indexs = {}
        currClass = [0]

        def processNode(node, isLeaf):
            # filters
            if not node.hasAttributes():
                return 0  # continue traversal
            if node.type in ['state', 'param', 'see']:  # skip those currently
                return 0

            # construct a name string
            if 'fullName' in node.attributes:
                longestName = node.attributes['fullName']
            elif 'name' in node.attributes:
                longestName = node.attributes['name']
            else:  # cannot handle unnamed entities
                return 0

            if longestName in fullNames:  # don't treat a node twice
                return 0

            # construct type string
            if node.type == "method":
                sfx = ""
                if 'access' in node.attributes:
                    acc = node.attributes['access']
                    if acc == "public":
                        sfx = "_pub"
                    elif acc == 'protected':
                        sfx = '_prot'
                    elif acc == 'private':
                        sfx = '_priv'
                    else:
                        sfx = '_pub'  # there seem to be methods with weird access attribs
                else:
                    sfx = "_pub"  # force unqualified to public
                n_type = node.type + sfx
            elif node.type == "property":
                sfx = "_pub"
                n_type = node.type + sfx
            else:
                n_type = node.type

            # add type?
            if n_type not in types:
                types.append(n_type)
            tyx = types.index(n_type)

            if node.type in ['class', 'interface', 'package', 'mixin']:
                # add to fullNames - assuming uniqueness
                fullNames.append(longestName)
                fnx = fullNames.index(longestName)
                # commemorate current container
                currClass[0] = fnx
            else:  # must be a class feature
                longestName = '#' + longestName
                fnx = currClass[0]

            # maintain index
            if longestName in indexs:
                indexs[longestName].append([tyx, fnx])
            else:
                indexs[longestName] = [[tyx, fnx]]

            return 0

        tree.nodeTreeMap(processNode)

        index = {
            "__types__": types,
            "__fullNames__": fullNames,
            "__index__": indexs
        }
        asString = json.dumps(index, separators=(',', ':'),
                              sort_keys=True)  # compact encoding

        return asString
Example #30
0
    def storeApi(self, include, apiPath, variantSet, jobConf):
        self._console.info("Generating API data...")
        self._console.indent()

        docTree = tree.Node("doctree")
        docTree.set("fullName", "")
        docTree.set("name", "")
        docTree.set("packageName", "")
        length = len(include)

        self._console.info("Loading class docs...", False)
        self._console.indent()

        packages = []
        AttachMap = {}
        hasErrors = False
        for pos, fileId in enumerate(include):
            self._console.progress(pos+1, length)
            fileApi, attachMap = self.getApi(fileId, variantSet)
            if fileApi == None:
                hasErrors = True

            # Only continue merging if there were no errors
            if not hasErrors:
                # update AttachMap
                for cls in attachMap: # 'qx.Class', 'qx.core.Object', 'q', ...
                    if cls not in AttachMap:
                        AttachMap[cls] = attachMap[cls]
                    else:
                        for section in attachMap[cls]:  # 'statics', 'members'
                            if section not in AttachMap[cls]:
                                AttachMap[cls][section] = attachMap[cls][section]
                            else:
                                for method in attachMap[cls][section]:  # 'define', 'showToolTip', ...
                                    if method not in AttachMap[cls][section]:
                                        AttachMap[cls][section][method] = attachMap[cls][section][method]
                                    else:
                                        self._console.warn("Multiple @attach for same target '%s::%s#%s'." % (cls, section, method))

                self._mergeApiNodes(docTree, fileApi)
                pkgId = self._classesObj[fileId].package
                # make sure all parent packages are included
                nsparts = pkgId.split('.')
                for i in range(len(nsparts)+1):
                    parentPkg = ".".join(nsparts[0:i])
                    if not parentPkg in packages:
                        packages.append(parentPkg)

        self._console.outdent()

        if hasErrors:
            self._console.error("Found erroneous API information. Please see above. Stopping!")
            return

        self._console.info("Loading package docs...")
        self._console.indent()

        packages.sort()
        for pkgId in packages:
            self._mergeApiNodes(docTree, self.getPackageApi(pkgId))

        self._console.outdent()

        self._console.info("Connecting classes...  ", feed=False)
        api.connectPackage(docTree, docTree)
        self._console.dotclear()

        self._console.info("Generating search index...")
        index = self.docTreeToSearchIndex(docTree, "", "", "")

        if "verify" in jobConf:
            if "links" in jobConf["verify"]:
                api.verifyLinks(docTree, index)
            if "types" in jobConf["verify"]:
                api.verifyTypes(docTree, index)

        if "warnings" in jobConf and "output" in jobConf["warnings"]:
            api.logErrors(docTree, jobConf["warnings"]["output"])

        if "verify" in jobConf:
            if "statistics" in jobConf["verify"]:
                api.verifyDocPercentage(docTree)

        self._console.info("Saving data...", False)
        self._console.indent()

        packageData = api.getPackageData(docTree)
        packageJson = json.dumps(packageData)
        filetool.save(os.path.join(apiPath, "apidata.json"), packageJson)

        # apply the @attach information
        for classData in api.classNodeIterator(docTree):
            className = classData.get("fullName")
            if className in AttachMap:
                self._applyAttachInfo(className, classData, AttachMap[className])

        # write per-class .json to disk
        length = 0
        for classData in api.classNodeIterator(docTree):
            length += 1

        links = []

        pos = 0
        for classData in api.classNodeIterator(docTree):
            pos += 1
            self._console.progress(pos, length)
            nodeData = tree.getNodeData(classData)
            nodeJson = json.dumps(nodeData)
            className = classData.get("fullName")
            fileName = os.path.join(apiPath, className + ".json")
            filetool.save(fileName, nodeJson)

            sitemap = False
            if "sitemap" in jobConf:
                sitemap = jobConf["sitemap"]
                if "link-uri" in sitemap:
                    links.append(sitemap["link-uri"] % className)

            #import pdb; pdb.set_trace()
            #for type in ["method", "method-static", "event", "property", "constant"]:
            #  for item in classData.getAllChildrenOfType(type):
            #      itemName = className + "~" + item.attributes["name"]
            #      link = linkPrefix + itemName

        self._console.outdent()

        # write apiindex.json
        self._console.info("Saving index...")
        indexContent = json.dumps(index, separators=(', ', ':'), sort_keys=True)  # compact encoding
        filetool.save(os.path.join(apiPath, "apiindex.json"), indexContent)

        # save sitemap
        if sitemap and len(links) > 0:
            self._console.info("Saving XML sitemap...")
            sitemapData = self.getSitemap(links)
            if "file" in sitemap:
                sitemapFile = sitemap["file"]
            else:
                sitemapFile = os.path.join(apiPath, "sitemap.xml")
            filetool.save(sitemapFile, sitemapData)

        self._console.outdent()
        self._console.info("Done")
Example #31
0
    def storeApi(self, include, apiPath, variantSet, verify):
        self._console.info("Generating API data...")
        self._console.indent()

        docTree = tree.Node("doctree")
        docTree.set("fullName", "")
        docTree.set("name", "")
        docTree.set("packageName", "")
        length = len(include)

        self._console.info("Loading class docs...", False)
        self._console.indent()

        packages = []
        AttachMap = {}
        hasErrors = False
        for pos, fileId in enumerate(include):
            self._console.progress(pos+1, length)
            fileApi, attachMap = self.getApi(fileId, variantSet)
            if fileApi == None:
                hasErrors = True
            
            # Only continue merging if there were no errors
            if not hasErrors:
                # update AttachMap
                for cls in attachMap: # 'qx.Class', 'qx.core.Object', 'q', ...
                    if cls not in AttachMap:
                        AttachMap[cls] = attachMap[cls]
                    else:
                        for section in attachMap[cls]:  # 'statics', 'members'
                            if section not in AttachMap[cls]:
                                AttachMap[cls][section] = attachMap[cls][section]
                            else:
                                for method in attachMap[cls][section]:  # 'define', 'showToolTip', ...
                                    if method not in AttachMap[cls][section]:
                                        AttachMap[cls][section][method] = attachMap[cls][section][method]
                                    else:
                                        self._console.warn("Multiple @attach for same target '%s::%s#%s'." % (cls, section, method))

                self._mergeApiNodes(docTree, fileApi)
                pkgId = self._classesObj[fileId].package
                # make sure all parent packages are included
                nsparts = pkgId.split('.')
                for i in range(len(nsparts)+1):
                    parentPkg = ".".join(nsparts[0:i])
                    if not parentPkg in packages:
                        packages.append(parentPkg)

        self._console.outdent()

        if hasErrors:
            self._console.error("Found erroneous API information. Please see above. Stopping!")
            return
                
        self._console.info("Loading package docs...")
        self._console.indent()
        
        packages.sort()
        for pkgId in packages:
            self._mergeApiNodes(docTree, self.getPackageApi(pkgId))

        self._console.outdent()

        self._console.info("Connecting classes...")
        api.connectPackage(docTree, docTree)

        self._console.info("Generating search index...")
        index = self.docTreeToSearchIndex(docTree, "", "", "")
        
        if verify and "links" in verify:
            self.verifyLinks(docTree, index)
        
        self._console.info("Saving data...", False)
        self._console.indent()

        packageData = api.getPackageData(docTree)
        packageJson = json.dumps(packageData)
        filetool.save(os.path.join(apiPath, "apidata.json"), packageJson)

        # apply the @attach information
        for classData in api.classNodeIterator(docTree):
            className = classData.get("fullName")
            if className in AttachMap:
                self._applyAttachInfo(className, classData, AttachMap[className])
        
        # write per-class .json to disk
        length = 0
        for classData in api.classNodeIterator(docTree):
            length += 1
            
        pos = 0
        for classData in api.classNodeIterator(docTree):
            pos += 1
            self._console.progress(pos, length)
            nodeData = tree.getNodeData(classData)
            nodeJson = json.dumps(nodeData)
            fileName = os.path.join(apiPath, classData.get("fullName") + ".json")
            filetool.save(fileName, nodeJson)
            
        self._console.outdent()
            
        # writ apiindex.json
        self._console.info("Saving index...")
        indexContent = json.dumps(index, separators=(',',':'), sort_keys=True) # compact encoding
        filetool.save(os.path.join(apiPath, "apiindex.json"), indexContent)            

        self._console.outdent()
        self._console.info("Done")
def runImageCombining(jobconf, confObj):

    def extractFromPrefixSpec(prefixSpec):
        prefix = altprefix = ""
        if not prefixSpec or not isinstance(prefixSpec, types.ListType):
            if jobconf.get("config-warnings/combine-images", True):
                console.warn("Missing or incorrect prefix spec, might lead to incorrect resource id's.")
        elif len(prefixSpec) == 2 :  # prefixSpec = [ prefix, altprefix ]
            prefix, altprefix = prefixSpec
        elif len(prefixSpec) == 1:
            prefix            = prefixSpec[0]
            altprefix         = ""
        return prefix, altprefix

    ##
    # strip prefix - if available - from imagePath, and replace by altprefix
    def getImageId(imagePath, prefixSpec):
        prefix, altprefix = extractFromPrefixSpec(prefixSpec)
        imageId = imagePath # init
        _, imageId, _ = Path.getCommonPrefix(imagePath, prefix) # assume: imagePath = prefix "/" imageId
        if altprefix:
            imageId   = altprefix + "/" + imageId

        imageId = Path.posifyPath(imageId)
        return imageId

    ##
    # create a dict with the clipped image file path as key, and prefix elements as value
    def getClippedImagesDict(imageSpec):
        imgDict = {}
        inputStruct = imageSpec['input']
        for group in inputStruct:
            prefixSpec = group.get('prefix', [])
            prefix, altprefix = extractFromPrefixSpec(prefixSpec)
            if prefix:
                prefix = confObj.absPath(prefix)
            for filepatt in group['files']:
                num_files = 0
                for file in glob.glob(confObj.absPath(filepatt)):  # resolve file globs - TODO: can be removed in generator.action.ImageClipping
                    console.debug("adding image %s" % file)
                    imgDict[file]    = [prefix, altprefix]
                    num_files       += 1
                if num_files == 0:
                    raise ValueError("Non-existing file spec: %s" % filepatt)

        return imgDict

    # ----------------------------------------------------------------------

    if not jobconf.get("combine-images", False):
        return
    
    console = Context.console
    cache = Context.cache

    console.info("Combining images...")
    console.indent()
    imageClipper = ImageClipping(console, cache, jobconf)

    images = jobconf.get("combine-images/images", {})
    for image, imgspec in images.iteritems():
        console.info("Creating image %s" % image)
        console.indent()
        imageId= getImageId(image, imgspec.get('prefix', []))
        image  = confObj.absPath(image)  # abs output path
        config = {}

        # create a dict of clipped image objects - for later look-up
        clippedImages = getClippedImagesDict(imgspec)

        # collect list of all input files, no matter where they come from
        input = sorted(clippedImages.keys())

        # collect layout property
        if 'layout' in imgspec:
            layout = imgspec['layout'] == "horizontal"
        else:
            layout = "horizontal" == "horizontal" # default horizontal=True

        # get type of combined image (png, base64, ...)
        combtype = "base64" if image.endswith(".b64.json") else "extension"

        # create the combined image
        subconfigs = imageClipper.combine(image, input, layout, combtype)

        # for the meta information, go through the list of returned subconfigs (one per clipped image)
        for sub in subconfigs:
            x = Image()
            x.combId, x.left, x.top, x.width, x.height, x.format = (
               imageId, sub['left'], sub['top'], sub['width'], sub['height'], sub['type'])
            subId = getImageId(sub['file'], clippedImages[sub['file']])
            config[subId] = x.toMeta()

        # store meta data for this combined image
        bname = os.path.basename(image)
        ri = bname.rfind('.')
        if ri > -1:
            bname = bname[:ri]
        bname += '.meta'
        meta_fname = os.path.join(os.path.dirname(image), bname)
        console.debug("writing meta file %s" % meta_fname)
        filetool.save(meta_fname, json.dumps(config, ensure_ascii=False, sort_keys=True))
        console.outdent()

        # handle base64 type, need to write "combined image" to file
        if combtype == "base64":
            combinedMap = {}
            for sub in subconfigs:
                subMap = {}
                subId  = getImageId(sub['file'], clippedImages[sub['file']])
                subMap['width']    = sub['width']
                subMap['height']   = sub['height']
                subMap['type']     = sub['type']
                subMap['encoding'] = sub['encoding']
                subMap['data']     = sub['data']
                combinedMap[subId] = subMap
            filetool.save(image, json.dumpsCode(combinedMap))

    console.outdent()

    return
Example #33
0
def runImageCombining(jobconf, confObj):

    def extractFromPrefixSpec(prefixSpec):
        prefix = altprefix = ""
        if not prefixSpec or not isinstance(prefixSpec, types.ListType):
            if jobconf.get("config-warnings/combine-images", True):
                console.warn("Missing or incorrect prefix spec, might lead to incorrect resource id's.")
        elif len(prefixSpec) == 2 :  # prefixSpec = [ prefix, altprefix ]
            prefix, altprefix = prefixSpec
        elif len(prefixSpec) == 1:
            prefix            = prefixSpec[0]
            altprefix         = ""
        return prefix, altprefix

    ##
    # strip prefix - if available - from imagePath, and replace by altprefix
    def getImageId(imagePath, prefixSpec):
        prefix, altprefix = extractFromPrefixSpec(prefixSpec)
        imageId = imagePath # init
        _, imageId, _ = Path.getCommonPrefix(imagePath, prefix) # assume: imagePath = prefix "/" imageId
        if altprefix:
            imageId   = altprefix + "/" + imageId

        imageId = Path.posifyPath(imageId)
        return imageId

    ##
    # create a dict with the clipped image file path as key, and prefix elements as value
    def getClippedImagesDict(imageSpec):
        imgDict = {}
        inputStruct = imageSpec['input']
        for group in inputStruct:
            prefixSpec = group.get('prefix', [])
            prefix, altprefix = extractFromPrefixSpec(prefixSpec)
            if prefix:
                prefix = confObj.absPath(prefix)
            for filepatt in group['files']:
                num_files = 0
                for file in glob.glob(confObj.absPath(filepatt)):  # resolve file globs - TODO: can be removed in generator.action.ImageClipping
                    console.debug("adding image %s" % file)
                    imgDict[file]    = [prefix, altprefix]
                    num_files       += 1
                if num_files == 0:
                    raise ValueError("Non-existing file spec: %s" % filepatt)

        return imgDict

    # ----------------------------------------------------------------------

    if not jobconf.get("combine-images", False):
        return
    
    console = Context.console
    cache = Context.cache

    console.info("Combining images...")
    console.indent()
    imageClipper = ImageClipping(console, cache, jobconf)

    images = jobconf.get("combine-images/images", {})
    for image, imgspec in images.iteritems():
        console.info("Creating image %s" % image)
        console.indent()
        imageId= getImageId(image, imgspec.get('prefix', []))
        image  = confObj.absPath(image)  # abs output path
        config = {}

        # create a dict of clipped image objects - for later look-up
        clippedImages = getClippedImagesDict(imgspec)

        # collect list of all input files, no matter where they come from
        input = sorted(clippedImages.keys())

        # collect layout property
        if 'layout' in imgspec:
            layout = imgspec['layout'] == "horizontal"
        else:
            layout = "horizontal" == "horizontal" # default horizontal=True

        # get type of combined image (png, base64, ...)
        combtype = "base64" if image.endswith(".b64.json") else "extension"

        # create the combined image
        subconfigs = imageClipper.combine(image, input, layout, combtype)

        # for the meta information, go through the list of returned subconfigs (one per clipped image)
        for sub in subconfigs:
            x = Image()
            x.combId, x.left, x.top, x.width, x.height, x.format = (
               imageId, sub['left'], sub['top'], sub['width'], sub['height'], sub['type'])
            subId = getImageId(sub['file'], clippedImages[sub['file']])
            config[subId] = x.toMeta()

        # store meta data for this combined image
        bname = os.path.basename(image)
        ri = bname.rfind('.')
        if ri > -1:
            bname = bname[:ri]
        bname += '.meta'
        meta_fname = os.path.join(os.path.dirname(image), bname)
        console.debug("writing meta file %s" % meta_fname)
        filetool.save(meta_fname, json.dumps(config, ensure_ascii=False, sort_keys=True))
        console.outdent()

        # handle base64 type, need to write "combined image" to file
        if combtype == "base64":
            combinedMap = {}
            for sub in subconfigs:
                subMap = {}
                subId  = getImageId(sub['file'], clippedImages[sub['file']])
                subMap['width']    = sub['width']
                subMap['height']   = sub['height']
                subMap['type']     = sub['type']
                subMap['encoding'] = sub['encoding']
                subMap['data']     = sub['data']
                combinedMap[subId] = subMap
            filetool.save(image, json.dumpsCode(combinedMap))

    console.outdent()

    return
Example #34
0
def debug(features):
    print
    print json.dumps(features)
    print