コード例 #1
0
    def testMemorySafety(self):
        # This is meant to be run with ASAN and checks that
        # certain use-after-frees are not present.
        cgi = neo_cgi.CGI()
        hdf = cgi.hdf
        del cgi
        hdf.getValue("x", "y")
        del hdf

        cgi = neo_cgi.CGI()
        cs = cgi.cs()
        del cgi
        cs.parseStr("x")
        del cs

        hdf = neo_util.HDF()
        hdf.setValue("y.z", "1")
        child = hdf.getChild("y")
        del hdf
        child.getValue("x", "y")
        del child

        hdf = neo_util.HDF()
        cs = neo_cs.CS(hdf)
        del hdf
        cs.parseStr("x")
        del cs
コード例 #2
0
ファイル: CS.py プロジェクト: rosin-project/roswiki
def execute(macro, args):
    request = macro.request
    content = []
    page_name = macro.formatter.page.page_name

    # get args
    include_page_name = ''
    if args is not None:
        (include_page_name, _, hdf_text) = args.partition(',')

    include_page_name = wikiutil.AbsPageName(page_name, include_page_name)

    include_page = Page(request, include_page_name)

    if include_page is None:
        return ''
    if not request.user.may.read(include_page_name):
        return ''

    cstemplate = include_page.getPageText()

    pagename = macro.formatter.page.page_name

    hdf = neo_util.HDF()
    hdf.readString(hdf_text)

    hdf.setValue("Config.WhiteSpaceStrip ", "0")

    cs = neo_cs.CS(hdf)
    cs.parseStr(cstemplate)

    body = cs.render()

    body = wikiutil.renderText(request, WikiParser, body)
    return body
コード例 #3
0
ファイル: clearsilver.py プロジェクト: omunroe-com/tracdebdev
    def __init__(self, loadpaths=[]):
        """Create a new HDF dataset.
        
        The loadpaths parameter can be used to specify a sequence of paths under
        which ClearSilver will search for template files:

        >>> hdf = HDFWrapper(loadpaths=['/etc/templates',
        ...                             '/home/john/templates'])
        >>> print hdf
        hdf {
          loadpaths {
            0 = /etc/templates
            1 = /home/john/templates
          }
        }
        """
        try:
            import neo_cgi
            # The following line is needed so that ClearSilver can be loaded when
            # we are being run in multiple interpreters under mod_python
            neo_cgi.update()
            import neo_util
            self.hdf = neo_util.HDF()
        except ImportError, e:
            raise TracError, "ClearSilver not installed (%s)" % e
コード例 #4
0
def _render(f, template_file, environ, start_response, *args, **kwds):

    # call our original function with original args
    try:
        results = f(environ, start_response)

        template_name, ext = template_file.split(".")
        contenttype = "text/html"
        if len(ext) > 1 and (ext[1] in extensions):
            contenttype = extensions[ext[1]]

        hdf = neo_util.HDF()
        _set(hdf, '', results)
        hdf.setValue('style', stylesheet_uri)

        # shove the results into the template
        clearsilver = neo_cs.CS(hdf)
        clearsilver.parseFile(os.path.join('templates', template_name + '.cs'))

        # XXX where is our error handling?
        start_response("200 OK", [('Content-Type', contenttype)])
        return [clearsilver.render()]
    except DataNotFound:
        start_response("404 Not Found", [('Content-Type', 'text/plain')])
        return ['404 Error, Content not found']
    except HTTP303, e:
        url = str(e.value)
        if not url.startswith(('http', '/')):
            url = request_uri(environ) + url
        start_response("302 Found", [('Location', url)])
        return ['Redirect to url']
コード例 #5
0
    def GenreateCode(self):
        items = urllib2.unquote(request.params['hdf']).split(';')
        # .split(';')
        hdf = neo_util.HDF()
        print items
        for item in items:
            print item
            v = item.split('=')
            if (len(v) == 2):
                hdf.setValue(v[0], v[1])

        cs = neo_cs.CS(hdf)
        cs.parseFile("./feedtpladmin/public/feed-java-code.cs"
                     )  # parse a file from disk
        gen_code = cs.render()

        url_path = '/FeedBuilder_' + request.params[
            'stype'] + '_' + request.params['version'] + '.java'

        f = open("./feedtpladmin/public" + url_path, "w")

        f.write(gen_code)
        f.close()

        return url_path
コード例 #6
0
def dump_bug(level, etype, msg, location=None, nhdf=None):
    global DISABLE_DUMP
    if DISABLE_DUMP: return

    now = int(time.time())
    pid = os.getpid()

    import neo_cgi, neo_util
    hdf = neo_util.HDF()
    hdf.setValue("Required.Level", level)
    hdf.setValue("Required.When", str(int(time.time())))
    hdf.setValue("Required.Type", etype)
    hdf.setValue("Required.Title", msg)
    hdf.setValue("Optional.Hostname", socket.gethostname())
    if location:
        hdf.setValue("Optional.Location", location)

    for (key, value) in os.environ.items():
        hdf.setValue("Environ.%s" % key, value)

    global Count
    Count = Count + 1
    fname = "%d.%d_%d.%s" % (now, pid, Count, socket.gethostname())
    checkPaths()

    tpath = os.path.join(DUMP_DIR, "tmp", fname)
    npath = os.path.join(DUMP_DIR, "new", fname)
    hdf.writeFile(tpath)
    os.rename(tpath, npath)
コード例 #7
0
ファイル: clearsilver2.py プロジェクト: kuj1pal/roswiki
    def format(self, formatter):
        # print "format"
        # format is also called for each !# command. its called after __init__
        # is called. this is where parsers do most of their work.
        # they write their results into the Httprequest object
        # which is usually stored from __init__ in self.request.

        # print "formatter",dir(formatter)
        # formatter is a special object in MoinMoin that
        # is supposed to help people who write extensions to have
        # sort of a uniform looking thing going on.
        # see http://moinmoin.wikiwikiweb.de/ApplyingFormatters?highlight=%28formatter%29

        # but formatter is not documented well. you have to look at
        # moinmoin/formatter/base.py. And if you do, you will see that half of
        # the methods raise a 'not implemented' error.
        # formatter is also being refactored alot so dont get used to it.
        # if all else fails just use formatter.rawHTML which will
        # screw up XML output but at least it will work.

        page_name = formatter.page.page_name
        cs_template_page = wikiutil.AbsPageName(page_name,
                                                self.kw["format_args"])
        cs_template = Page(self.request, cs_template_page).getPageText()

        hdf = neo_util.HDF()
        hdf.readString(self.raw)
        hdf.setValue("Config.WhiteSpaceStrip", "0")

        cs = neo_cs.CS(hdf)
        cs.parseStr(cs_template)
        body = cs.render()
        body = wikiutil.renderText(self.request, WikiParser, body)
        self.request.write(formatter.rawHTML(body))
コード例 #8
0
ファイル: cgistarter.py プロジェクト: promikeda/web_interface
    def error(self, ecode, reason=None):
        import httpResponses
        message = httpResponses.gHTTPResponses[ecode]

        template = httpResponses.errorMessage_Default
        if ecode == 404:
            template = httpResponses.errorMessage_404

        hdf = neo_util.HDF()
        hdf.setValue("code", str(ecode))
        if message: hdf.setValue("message", message)
        if reason: hdf.setValue("reason", reason)

        for key, val in self.context.environ.items():
            hdf.setValue("environ." + key, str(val))

        self.context.stdout.write("Content-Type: text/html\r\n")
        self.context.setStatus(None, ecode)
        self.context.stdout.write("Status: %s\r\n" % ecode)
        self.context.stdout.write("\r\n")

        cs = neo_cs.CS(hdf)
        cs.parseStr(template)
        page = cs.render()

        self.context.stdout.write(page)

        warn("Error", message, reason)
コード例 #9
0
ファイル: trans.py プロジェクト: sschoen/clearsilver
    def loadMap(self, file, prefix, lang):
        log("Loading map for language %s" % lang)
        hdf = neo_util.HDF()
        hdf.readFile(file)
        obj = hdf.getChild(prefix)
        updates = 0
        new_r = 0
        while obj is not None:
            s_id = obj.name()
            str = obj.value()

            try:
                map_r = self.tdb.maps.fetchRow([('string_id', s_id),
                                                ('lang', lang)])
            except odb.eNoMatchingRows:
                map_r = self.tdb.maps.newRow()
                map_r.string_id = s_id
                map_r.lang = lang
                new_r = new_r + 1

            if map_r.string != str:
                updates = updates + 1
                map_r.string = str
                map_r.save()

            obj = obj.next()
        log("New maps: %d  Updates: %d" % (new_r, updates - new_r))
コード例 #10
0
 def testCsRenderStrip(self):
     hdf = neo_util.HDF()
     cs = neo_cs.CS(hdf)
     hdf.setValue("Foo.Bar", "1")
     cs.parseStr("This is my         file   <?cs var:Foo.Bar ?>   ")
     assert cs.render() == 'This is my         file   1   '
     hdf.setValue("ClearSilver.WhiteSpaceStrip", "1")
     assert cs.render() == 'This is my file 1 '
コード例 #11
0
 def ValidateTemplate(self):
     tpl = request.params['tpl']
     hdf = neo_util.HDF()  # create an HDF dataset
     cs = neo_cs.CS(hdf)
     try:
         cs.parseStr(urllib2.unquote(str(tpl)))
         cs.render()
     except neo_util.ParseError, e:
         return "错误:", str(e)
コード例 #12
0
ファイル: Notify.py プロジェクト: omunroe-com/tracdebdev
 def __init__(self, env, msg_template):
     self.env = env
     self.db = env.get_db_cnx()
     self.hdf = neo_util.HDF()
     core.populate_hdf(self.hdf, env, self.db, None)
     tmpl = os.path.join(env.get_config('general', 'templates_dir'),
                         msg_template)
     self.cs = neo_cs.CS(self.hdf)
     self.cs.parseFile(tmpl)
コード例 #13
0
 def render():
     hdf = neo_util.HDF()
     hdf.setValue('hdf.loadpaths.0', dirname)
     hdf.setValue('title', escape('Just a test'))
     hdf.setValue('user', escape('joe'))
     for num in range(1, 15):
         hdf.setValue('items.%d' % (num - 1), escape('Number %d' % num))
     cs = neo_cs.CS(hdf)
     cs.parseFile('template.cs')
     return cs.render()
コード例 #14
0
ファイル: basic.py プロジェクト: moyaproject/moya
 def render():
     hdf = neo_util.HDF()
     hdf.setValue("hdf.loadpaths.0", dirname)
     hdf.setValue("title", escape("Just a test"))
     hdf.setValue("user", escape("joe"))
     for num in range(1, 15):
         hdf.setValue("items.%d" % (num - 1), escape("Number %d" % num))
     cs = neo_cs.CS(hdf)
     cs.parseFile("template.cs")
     return cs.render()
コード例 #15
0
 def init_request(self):
     import neo_cgi
     # The following line is needed so that ClearSilver can be loaded when
     # we are being run in multiple interpreters under mod_python
     neo_cgi.update()
     import neo_cs
     import neo_util
     import Cookie
     self.hdf = neo_util.HDF()
     self.incookie = Cookie.SimpleCookie()
     self.outcookie = Cookie.SimpleCookie()
コード例 #16
0
 def ValidateTemplate(self):
     stype = int(request.params['stype'])
     tpl = request.params['tpl']
     if stype <= 0 or not tpl:
         return '请求参数错误'
     hdf = neo_util.HDF()  # create an HDF dataset
     cs = neo_cs.CS(hdf)
     try:
         cs.parseStr(urllib2.unquote(str(tpl)))
         cs.render()
     except neo_util.ParseError, e:
         return "错误:", str(e)
コード例 #17
0
ファイル: bigtable.py プロジェクト: ui-frontend/moya
    def test_clearsilver():
        """ClearSilver"""
        hdf = neo_util.HDF()
        for i, row in enumerate(table):
            for j, c in enumerate(row.values()):
                hdf.setValue("rows.%d.cell.%d" % (i, j), cgi.escape(str(c)))

        cs = neo_cs.CS(hdf)
        cs.parseStr("""
<table><?cs
  each:row=rows
?><tr><?cs each:c=row.cell
  ?><td><?cs var:c ?></td><?cs /each
?></tr><?cs /each?>
</table>""")
        cs.render()
コード例 #18
0
ファイル: IncludeCSTemplate.py プロジェクト: kuj1pal/roswiki
def execute(macro, args):
    request = macro.request
    content = []
    page_name = macro.formatter.page.page_name

    # get args
    include_page_name = ''
    if args is not None:
        include_page_name = args

    include_page_name = wikiutil.AbsPageName(page_name, include_page_name)

    include_page = Page(request, include_page_name)

    if include_page is None:
        return ''
    if not request.user.may.read(include_page_name):
        return ''

    cstemplate = include_page.getPageText()

    pagename = macro.formatter.page.page_name
    kv_page = Page(request, pagename)
    kv_body = kv_page.get_body()

    hdf = neo_util.HDF()
    for line in kv_body.split("\n"):
        if line.startswith("##"):
            line = line[2:].strip()
            parts = line.split("=", 1)
            if len(parts) == 2:
                val = parts[1].strip()
                #val = parts[1].strip().encode('utf-8')  # swh
                hdf.setValue(parts[0].strip(), val)

    hdf.setValue("Config.WhiteSpaceStrip ", "0")

    cs = neo_cs.CS(hdf)
    cs.parseStr(cstemplate)

    body = cs.render()

    body = wikiutil.renderText(request, WikiParser, body)

    open("/tmp/out.html", "w").write(body)

    return body
コード例 #19
0
ファイル: pywrapper_test.py プロジェクト: sschoen/clearsilver
 def testHdf(self):
     hdf = neo_util.HDF()
     hdf.setValue("Foo", "bar")
     assert hdf.getValue("Foo", "baz") == "bar"
     assert hdf.getValue("Foo.1", "baz") == "baz"
     hdf.setValue("Numbers.1", "1")
     hdf.setValue("Numbers.2", "2")
     hdf.setValue("Numbers.3", "3")
     assert hdf.getIntValue("Numbers.2", -1) == 2
     assert hdf.getIntValue("Numbers.5", -1) == -1
     hdf_num = hdf.getObj("Numbers")
     assert hdf_num.getIntValue("2", -1) == 2
     assert hdf_num.name() == "Numbers"
     assert hdf_num.child().name() == "1"
     assert hdf_num.child().value() == "1"
     hdf.setAttr("Numbers", "type", "integers")
     hdf.setAttr("Numbers", "k", "v")
     assert hdf.getAttrs("Numbers") == [('type', 'integers'), ('k', 'v')]
     assert hdf_num.attrs() == [('type', 'integers'), ('k', 'v')]
コード例 #20
0
    def dump_hdf(self, directory, owner):
        global QUIET
        sys.path.insert(0, "../python")
        sys.path.insert(0, "python")
        import neo_cgi, neo_util
        hdf = neo_util.HDF()
        date = time.strftime("%d %B %Y", time.localtime(time.time()))
        if not self._funcs.items(): return
        for name, f in self._funcs.items():
            if f._title is None and f._desc is None and f._args is None and f._retr is None:
                if not QUIET:
                    sys.stderr.write('-W- No info for function "%s()"\n' %
                                     name)
                continue
            if f._defn is None:
                if not QUIET:
                    sys.stderr.write('-W- No defn for function "%s()"\n' %
                                     name)
            hdf.setValue("Code.%s" % name, name)
            obj = hdf.getObj("Code.%s" % name)
            obj.setValue("Name", name)
            obj.setValue("filename", self._filename)
            if f._title: obj.setValue("Title", f._title)
            if f._defn: obj.setValue("Define", neo_cgi.text2html(f._defn))
            if f._args: obj.setValue("Args", neo_cgi.text2html(f._args))
            if f._desc: obj.setValue("Desc", neo_cgi.text2html(f._desc))
            if string.strip(f._other):
                obj.setValue("Other",
                             neo_cgi.text2html(string.strip(f._other)))
            if f._output: obj.setValue("Output", neo_cgi.text2html(f._output))
            n = 0
            for func in self._funcs.keys():
                obj.setValue("related.%d" % n, func)
                n = n + 1

        fname = self._filename
        x = string.rindex(fname, "/")
        if x != -1: fname = fname[x + 1:]
        x = string.rindex(fname, '.')
        if x != -1: fname = fname[:x]

        hdf.writeFile("%s/%s.hdf" % (directory, fname))
コード例 #21
0
  def getItemReferences(self, key):
    if self.loggedin == False:
      self.login()

    key = key.strip()

    url = self.site + "invent/api.py?Action.getItemReferences=1&key=%s" % (key,)
    fp = self.opener.open(url)
    body = fp.read()
    fp.close()

    import neo_cgi, neo_util
    import simple_hdfhelp as hdfhelp
    hdf = neo_util.HDF()
    hdf.readString(body)

    ret = {}
    for k,o in hdfhelp.hdf_ko_iterator(hdf.getObj("CGI.cur.refs")):
      ret[o.getValue("name", "")] = o.getValue("reference", "")
    
    return ret
コード例 #22
0
ファイル: trans.py プロジェクト: sschoen/clearsilver
 def stringsHDF(self, prefix, locations, lang='en', exist=0, tiered=0):
     hdf = neo_util.HDF()
     if exist and lang == 'en': return hdf
     done = {}
     locations.sort()
     maps = self.tdb.maps.fetchRows(('lang', lang))
     maps_d = {}
     for map in maps:
         maps_d[int(map.string_id)] = map
     strings = self.tdb.strings.fetchRows()
     strings_d = {}
     for string in strings:
         strings_d[int(string.string_id)] = string
     count = 0
     for loc in locations:
         s_id = int(loc.string_id)
         if done.has_key(s_id): continue
         try:
             s_row = maps_d[s_id]
             if exist: continue
         except KeyError:
             try:
                 s_row = strings_d[s_id]
             except KeyError:
                 log("Missing string_id %d, skipping" % s_id)
                 continue
         count = count + 1
         if tiered:
             hdf.setValue(
                 "%s.%d.%d.%s" % (prefix, int(s_id) / TIER1_DIV,
                                  int(s_id) / TIER2_DIV, s_id),
                 s_row.string)
         else:
             hdf.setValue("%s.%s" % (prefix, s_id), s_row.string)
         done[s_id] = 1
     if exist == 1: log("Missing %d strings for lang %s" % (count, lang))
     return hdf
コード例 #23
0
ファイル: mk_dhcpd.py プロジェクト: xuniuer/python
import neo_cgi
import neo_util
import neo_cs

hdf = neo_util.HDF()
hdf.setValue("hdf.loadpaths.0", ".")
hdf.readFile("units.hdf")
hdf.readFile("slots.hdf")

cs = neo_cs.CS(hdf)
cs.parseFile("dhcpd.conf.cst")

print cs.render()
コード例 #24
0
def generate_po_files(hdf_file, po_dir, textDomain):
    ## prepare hdf
    if ((not os.path.isfile(hdf_file)) or (not os.access(hdf_file, os.R_OK))):
        sys.stderr.write("Unable to read the hdf file: %s\n" % hdf_file)
        return
    if not os.path.isdir(po_dir):
        os.mkdir(po_dir)
    pot_file = os.path.join(po_dir, "%s.pot" % textDomain)
    hdf = neo_util.HDF()
    hdf.readFile(hdf_file)
    ## update pot
    if not os.path.isfile(pot_file):
        sys.stdout.write("Creating: %s\n" % pot_file)
        pot = translate.storage.po.pofile(encoding="utf-8")
        pot.makeheader(pot_creation_date=True)
        pot.updateheader(add=True,
                         Project_Id_Version='ezmlm-web 3.3',
                         pot_creation_date=True,
                         language_team='Lars Kruse <%s>' % MAIL_ADDRESS,
                         Report_Msgid_Bugs_To=MAIL_ADDRESS,
                         encoding='utf-8',
                         Plural_Forms=['nplurals=2', 'plural=(n != 1)'])
        #TODO: somehow we need 'updateheaderplural'
    else:
        sys.stdout.write("Loading: %s\n" % pot_file)
        pot = translate.storage.po.pofile.parsefile(pot_file)
    ## remove all msgids - we will add them later
    pot.units = []

    ## add new entries
    def walk_hdf(prefix, node):
        def addPoItem(hdf_node):
            ## ignore hdf values with a "LINK" attribute
            for (key, value) in hdf_node.attrs():
                if key == "LINK":
                    return
            if not hdf_node.value():
                return
            item = pot.findunit(hdf_node.value())
            if not item:
                item = pot.addsourceunit(hdf_node.value())
                item.addlocation("%s%s" % (prefix, hdf_node.name()))

        while node:
            if node.name():
                new_prefix = prefix + node.name() + '.'
            else:
                new_prefix = prefix
            ## as the attribute feature of clearsilver does not work yet, we
            ## have to rely on magic names to prevent the translation of links
            if not (new_prefix.endswith(".Link.Rel.") \
              or new_prefix.endswith(".Link.Prot.") \
              or new_prefix.endswith(".Link.Abs.") \
              or new_prefix.endswith(".Link.Attr1.name.") \
              or new_prefix.endswith(".Link.Attr1.value.") \
              or new_prefix.endswith(".Link.Attr2.name.") \
              or new_prefix.endswith(".Link.Attr2.value.") \
              or new_prefix == "Lang.Name."):
                addPoItem(node)
            walk_hdf(new_prefix, node.child())
            node = node.next()

    walk_hdf("", hdf)
    pot.savefile(pot_file)
    ## create po files
    for ld in ALL_LANGUAGES:
        if not os.path.isdir(os.path.join(po_dir, ld)):
            os.mkdir(os.path.join(po_dir, ld))
        if not os.path.isdir(os.path.join(po_dir, ld, 'LC_MESSAGES')):
            os.mkdir(os.path.join(po_dir, ld, 'LC_MESSAGES'))
        po_file = os.path.join(po_dir, ld, 'LC_MESSAGES', "%s.po" % textDomain)
        if not os.path.isfile(po_file):
            translate.convert.pot2po.convertpot(file(pot_file),
                                                file(po_file, 'w'), None)
        else:
            po2_file = po_file + '.new'
            translate.convert.pot2po.convertpot(file(pot_file),
                                                file(po2_file, 'w'),
                                                file(po_file))
            os.rename(po2_file, po_file)
        if ld == DEFAULT_LANG:
            ## set every msgstr to the respective msgid
            po_data = translate.storage.po.pofile.parsefile(po_file)
            po_data.removeduplicates()
            for po_unit in po_data.units:
                po_unit.settarget(po_unit.getsource())
            po_data.savefile(po_file)
        else:
            po_data = translate.storage.po.pofile.parsefile(po_file)
            po_data.removeduplicates()
            ## go through all msgstr and remove empty ones
            for index in range(len(po_data.units) - 1, 0, -1):
                if po_data.units[index].isfuzzy() and \
                  (po_data.units[index].msgidlen() == 0):
                    po_data.units.remove(po_data.units[index])
            po_data.savefile(po_file)
        if USE_SVN:
            revert_if_unchanged(po_file)
        ## make it writeable for pootle
        os.chmod(po_file, 0666)
        ## compile po file
        mo_file = po_file[:-3] + '.mo'
        translate.tools.pocompile.convertmo(file(po_file), file(mo_file, 'w'),
                                            file(pot_file))
コード例 #25
0
def generate_translated_hdf_file(orig_hdf_file, po_dir, hdf_dir, textdomain,
                                 language, src_languages):
    import gettext
    ## prepare original hdf
    if ((not os.path.isfile(orig_hdf_file))
            or (not os.access(orig_hdf_file, os.R_OK))):
        sys.stderr.write("Unable to read the hdf file: %s\n" % orig_hdf_file)
        return
    hdf = neo_util.HDF()
    hdf.readFile(orig_hdf_file)
    ## name of new hdf file
    new_hdf_file = os.path.join(hdf_dir, language + '.hdf')
    ## create translation object
    translator = gettext.translation(textdomain,
                                     localedir=po_dir,
                                     languages=src_languages)

    ## translate entries
    ## count the number of translated items - so we can decide later, if we
    ## want to create the language file
    def walk_hdf(prefix, node):
        translate_count = 0

        def addHdfItem(hdf_node):
            ## ignore hdf values with a "LINK" attribute
            for (key, value) in hdf_node.attrs():
                if key == "LINK":
                    return
            if not hdf_node.value():
                return
            translated = translator.gettext(hdf_node.value())
            if translated:
                hdf.setValue("%s%s" % (prefix, hdf_node.name()), translated)
                return True
            else:
                hdf.setValue("%s%s" % (prefix, hdf_node.name()),
                             hdf_node.value())
                return False

        while node:
            if node.name():
                new_prefix = prefix + node.name() + '.'
            else:
                new_prefix = prefix
            ## as the attribute feature of clearsilver does not work yet, we
            ## have to rely on magic names to prevent the translation of links
            if (new_prefix.endswith(".Link.Rel.") \
              or new_prefix.endswith(".Link.Prot.") \
              or new_prefix.endswith(".Link.Abs.") \
              or new_prefix.endswith(".Link.Attr1.name.") \
              or new_prefix.endswith(".Link.Attr1.value.") \
              or new_prefix.endswith(".Link.Attr2.name.") \
              or new_prefix.endswith(".Link.Attr2.value.")):
                pass
            elif new_prefix == "Lang.Name.":
                # set the "Lang.Name" attribute properly
                # remove trailing dot
                new_prefix = new_prefix.strip(".")
                if language in LANGUAGE_NAMES:
                    hdf.setValue(new_prefix, LANGUAGE_NAMES[language])
                else:
                    hdf.setValue(new_prefix, language)
            else:
                if addHdfItem(node):
                    translate_count += 1
            translate_count += walk_hdf(new_prefix, node.child())
            node = node.next()
        return translate_count

    translated_items_count = walk_hdf("", hdf)
    ## if there was at least one valid translation, then we should write
    ## the language file
    if translated_items_count > 0:
        print "Writing translation: %s" % language
        hdf.writeFile(new_hdf_file)
    else:
        print "Skipping empty translation: %s" % language
コード例 #26
0
ファイル: hdfhelp.py プロジェクト: sschoen/clearsilver
def test():
    import neo_util
    hdf = neo_util.HDF()
    hdf.setValue("foo","1")
    print eval_cs(hdf,"this should say 1  ===> <?cs var:foo ?>")
コード例 #27
0
ファイル: trans.py プロジェクト: sschoen/clearsilver
class Translator:
    _HTML_TAG_RE = None
    _HTML_TAG_REGEX = '<[^!][^>]*?>'
    _HTML_CMT_RE = None
    _HTML_CMT_REGEX = '<!--.*?-->'
    _CS_TAG_RE = None
    _CS_TAG_REGEX = '<\\?.+?\\?>'

    def __init__(self):
        self.tdb = db_trans.trans_connect()

        # configuration data ......
        #  - we should stop hardcoding this... - jeske

        self.root = "testroot"
        self.languages = ['es', 'en']

        self.ignore_paths = ['tmpl/m']  # common place for mockups
        self.ignore_files = ['blah_ignore.cs']  # ignore clearsilver file

        # ignore clearsilver javascript files
        self.ignore_patterns = ['tmpl/[^ ]*_js.cs']

        # ............................

        if self.root is None:
            raise "Unable to determine installation root"

        if Translator._HTML_TAG_RE is None:
            Translator._HTML_TAG_RE = re.compile(Translator._HTML_TAG_REGEX,
                                                 re.MULTILINE | re.DOTALL)
        if Translator._HTML_CMT_RE is None:
            Translator._HTML_CMT_RE = re.compile(Translator._HTML_CMT_REGEX,
                                                 re.MULTILINE | re.DOTALL)
        if Translator._CS_TAG_RE is None:
            Translator._CS_TAG_RE = re.compile(Translator._CS_TAG_REGEX,
                                               re.MULTILINE | re.DOTALL)

        self._html_state = 0

    def parseHTMLTag(self, data):
        # this is only called if we see a full tag in one parse...
        i = 0
        if len(data) == 0: return []
        if data[0] in '/?': return []
        while i < len(data) and data[i] not in ' \n\r\t>':
            i = i + 1
        if i == len(data): return []
        tag = data[:i].lower()
        #print "Searching tag: %s" % data
        #print "Found tag: %s" % tag
        results = []
        attrfind = re.compile(r'\s*([a-zA-Z_][-.a-zA-Z_0-9]*)(\s*=\s*'
                              r'(\'[^\']*\'|"[^"]*"|[^ \t\n<>]*))?')
        k = i
        attrs = {}
        attrs_beg = {}
        while k < len(data):
            match = attrfind.match(data, k)
            if not match: break
            attrname, rest, attrvalue = match.group(1, 2, 3)
            if not rest:
                attrvalue = attrname
            elif attrvalue[:1] == '\'' == attrvalue[-1:] or \
                 attrvalue[:1] == '"' == attrvalue[-1:]:
                attrvalue = attrvalue[1:-1]
            attrname = attrname.lower()
            if attrs.has_key(attrname):
                log("Can't handle duplicate attrs: %s" % attrname)
            attrs[attrname] = attrvalue
            attrs_beg[attrname] = match.start(3)
            k = match.end(0)

        find_l = []
        if tag == "input":
            if attrs.get('type', "").lower() in ["submit", "button"]:
                find_l.append((attrs.get('value',
                                         ''), attrs_beg.get('value', 0)))

        for s, k in find_l:
            if s:
                x = data[k:].find(s)
                if x != -1: results.append((s, x + k, 1))

        return results

    def parseHTML(self, data, reset=1):
        if reset: self._html_state = 0
        if DEBUG:
            print "- %d ---------\n%s\n- E ---------" % (self._html_state,
                                                         data)

        results = []
        i = 0
        n = len(data)
        # if we had state from the last parse... find it
        if self._html_state:
            if self._html_state == 2:
                x = string.find(data[i:], '-->')
                l = 3
            else:
                x = string.find(data[i:], '>')
                l = 1
            if x == -1: return results
            i = i + x + l
            self._html_state = 0
        while i < n:
            if DEBUG: print "MATCHING>%s<MATCHING" % data[i:]
            cmt_b = string.find(data[i:], '<!--')
            cmt_e = string.find(data[i:], '-->')
            tag_b = string.find(data[i:], '<')
            tag_e = string.find(data[i:], '>')
            if DEBUG: print "B> %d %d %d %d <B" % (cmt_b, cmt_e, tag_b, tag_e)
            if cmt_b != -1 and cmt_b <= tag_b:
                x = i
                y = i + cmt_b - 1
                while x < y and data[x] in string.whitespace:
                    x += 1
                while y > x and data[y] in string.whitespace:
                    y -= 1
                results.append((data[x:y + 1], x, 1))
                if cmt_e == -1:  # partial comment:
                    self._html_state = 2
                    break
                i = i + cmt_e + 3
            elif tag_b != -1:
                x = i
                y = i + tag_b - 1
                while x < y and data[x] in string.whitespace:
                    x += 1
                while y > x and data[y] in string.whitespace:
                    y -= 1
                results.append((data[x:y + 1], x, 1))
                if tag_e == -1:  # partial tag
                    self._html_state = 1
                    break
                h_results = self.parseHTMLTag(data[i + tag_b + 1:i + tag_e])
                h_results = map(lambda x: (x[0], x[1] + i + tag_b + 1, x[2]),
                                h_results)
                results = results + h_results
                i = i + tag_e + 1
            else:
                x = i
                y = n - 1
                while x < y and data[x] in string.whitespace:
                    x += 1
                while y > x and data[y] in string.whitespace:
                    y -= 1
                results.append((data[x:y + 1], x, 1))
                break
        return results

    def parseCS(self, data):
        results = []
        i = 0
        n = len(data)
        while i < n:
            m = Translator._CS_TAG_RE.search(data, i)
            if not m:
                # search for a partial...
                x = string.find(data[i:], '<?')
                if x == -1:
                    results.append((data[i:], i))
                else:
                    results.append((data[i:x], i))
                break
            (b, e) = m.span()
            if i != b: results.append((data[i:b], i))
            i = e
        t_results = []
        self._html_in = 0
        for (s, ofs) in results:
            r = self.parseHTML(s, reset=0)
            r = map(lambda x: (x[0], x[1] + ofs, x[2]), r)
            t_results = t_results + r
        return t_results

    def descendHDF(self, obj, prefix):
        results = []
        while obj is not None:
            if obj.value():
                attrs = obj.attrs()
                attrs = map(lambda x: x[0], attrs)
                if "Lang" in attrs:
                    if prefix:
                        results.append(
                            (obj.value(), "%s.%s" % (prefix, obj.name()), 0))
                    else:
                        results.append((obj.value(), "%s" % (obj.name()), 0))
            if obj.child():
                if prefix:
                    results = results + self.descendHDF(
                        obj.child(), "%s.%s" % (prefix, obj.name()))
                else:
                    results = results + self.descendHDF(
                        obj.child(), (obj.name()))
            obj = obj.next()
        return results

    def parseHDF(self, data):
        # Ok, we handle HDF files specially.. the theory is, we only
        # extract entire HDF elements which have the attribute Lang
        hdf = neo_util.HDF()
        hdf.readString(data, 1)
        return self.descendHDF(hdf, "")

    def handleFile(self, file):
        if file in self.ignore_files: return []
        for a_re in self.ignore_patterns:
            if re.match(a_re, file):
                return []
        fpath = self.root + '/' + file
        x = string.rfind(file, '.')
        if x == -1: return []
        data = open(fpath, 'r').read()
        ext = file[x:]
        strings = []
        if ext in ['.cst', '.cs']:
            strings = self.parseCS(data)
        elif ext in ['.html', '.htm']:
            strings = self.parseHTML(data)
        elif ext in ['.hdf']:
            strings = self.parseHDF(data)
        if len(strings):
            print "Found %d strings in %s" % (len(strings), file)
            return strings
        return []

    def walkDirectory(self, path):
        if path in self.ignore_paths: return []
        fpath = self.root + '/' + path
        files = os.listdir(fpath)
        dirs = []
        results = []
        for file in files:
            if file[0] == '.': continue
            fname = fpath + '/' + file
            if os.path.isdir(fname):
                dirs.append(file)
            else:
                strings = self.handleFile(path + '/' + file)
                if len(strings):
                    results.append((path + '/' + file, strings))
        for dir in dirs:
            if dir not in ["release"]:
                results = results + self.walkDirectory(path + '/' + dir)
        return results

    def cleanHtmlString(self, s):
        s = re.sub("\s+", " ", s)
        return string.strip(s)

    def containsWords(self, s, ishtml):
        if ishtml:
            s = string.replace(s, '&nbsp;', ' ')
            s = string.replace(s, '&quot;', '"')
            s = string.replace(s, '&copy;', '')
            s = string.replace(s, '&lt;', '<')
            s = string.replace(s, '&gt;', '>')
            s = string.replace(s, '&amp;', '&')
        for x in range(len(s)):
            n = ord(s[x])
            if (n > 47 and n < 58) or (n > 64 and n < 91) or (n > 96
                                                              and n < 123):
                return 1
        return 0

    def findString(self, s):
        rows = self.tdb.strings.fetchRows(('string', s))
        if len(rows) == 0:
            row = self.tdb.strings.newRow()
            row.string = s
            row.save()
            return row.string_id
        elif len(rows) > 1:
            raise eTransError, "String %s exists multiple times!" % s
        else:
            return rows[0].string_id

    def loadStrings(self, one_file=None, verbose=0):
        if one_file is not None:
            strings = self.handleFile(one_file)
            results = [(one_file, strings)]
        else:
            results = self.walkDirectory('tmpl')
        uniq = {}
        cnt = 0
        seen_hdf = {}
        for fname, strings in results:
            for (s, ofs, ishtml) in strings:
                if s and string.strip(s):
                    l = len(s)
                    if ishtml:
                        s = self.cleanHtmlString(s)
                    if self.containsWords(s, ishtml):
                        if type(ofs) == type(""):  # HDF
                            if seen_hdf.has_key(ofs):
                                if seen_hdf[ofs][0] != s:
                                    log("Duplicate HDF Name %s:\n\t file %s = %s\n\t file %s = %s"
                                        % (ofs, seen_hdf[ofs][1],
                                           seen_hdf[ofs][0], fname, s))
                            else:
                                seen_hdf[ofs] = (s, fname)
                        try:
                            uniq[s].append((fname, ofs, l))
                        except KeyError:
                            uniq[s] = [(fname, ofs, l)]
                        cnt = cnt + 1
        print "%d strings, %d unique" % (cnt, len(uniq.keys()))
        fp = open("map", 'w')
        for (s, locs) in uniq.items():
            locs = map(lambda x: "%s:%s:%d" % x, locs)
            fp.write('#: %s\n' % (string.join(locs, ',')))
            fp.write('msgid=%s\n\n' % repr(s))

        log("Loading strings/locations into database")
        locations = []
        for (s, locs) in uniq.items():
            s_id = self.findString(s)
            for (fname, ofs, l) in locs:
                if type(ofs) == type(""):  # ie, its HDF
                    location = "hdf:%s" % ofs
                else:
                    location = "ofs:%d:%d" % (ofs, l)
                loc_r = TransLoc(s_id, fname, location)
                locations.append(loc_r)
        return locations

    def stringsHDF(self, prefix, locations, lang='en', exist=0, tiered=0):
        hdf = neo_util.HDF()
        if exist and lang == 'en': return hdf
        done = {}
        locations.sort()
        maps = self.tdb.maps.fetchRows(('lang', lang))
        maps_d = {}
        for map in maps:
            maps_d[int(map.string_id)] = map
        strings = self.tdb.strings.fetchRows()
        strings_d = {}
        for string in strings:
            strings_d[int(string.string_id)] = string
        count = 0
        for loc in locations:
            s_id = int(loc.string_id)
            if done.has_key(s_id): continue
            try:
                s_row = maps_d[s_id]
                if exist: continue
            except KeyError:
                try:
                    s_row = strings_d[s_id]
                except KeyError:
                    log("Missing string_id %d, skipping" % s_id)
                    continue
            count = count + 1
            if tiered:
                hdf.setValue(
                    "%s.%d.%d.%s" % (prefix, int(s_id) / TIER1_DIV,
                                     int(s_id) / TIER2_DIV, s_id),
                    s_row.string)
            else:
                hdf.setValue("%s.%s" % (prefix, s_id), s_row.string)
            done[s_id] = 1
        if exist == 1: log("Missing %d strings for lang %s" % (count, lang))
        return hdf

    def dumpStrings(self, locations, lang=None):
        log("Dumping strings to HDF")
        if lang is None:
            langs = ['en']
            sql = "select lang from nt_trans_maps group by lang"
            cursor = self.tdb.defaultCursor()
            cursor.execute(sql)
            rows = cursor.fetchall()
            for row in rows:
                langs.append(row[0])
        else:
            langs = [lang]

        for a_lang in langs:
            hdf = self.stringsHDF('S', locations, a_lang)
            hdf.writeFile("strings_%s.hdf" % a_lang)

        for a_lang in langs:
            hdf = self.stringsHDF('S', locations, a_lang, exist=1)
            if hdf.child():
                hdf.writeFile("strings_missing_%s.hdf" % a_lang)

    def fetchString(self, s_id, lang):
        if lang == "hdf":
            return "<?cs var:Lang.Extracted.%d.%d.%s ?>" % (
                int(s_id) / TIER1_DIV, int(s_id) / TIER2_DIV, s_id)
        rows = self.tdb.maps.fetchRows([('string_id', s_id), ('lang', lang)])
        if len(rows) == 0:
            try:
                row = self.tdb.strings.fetchRow(('string_id', s_id))
            except odb.eNoMatchingRows:
                log("Unable to find string id %s" % s_id)
                raise eNoString
            if lang != 'en':
                log("Untranslated string for id %s" % s_id)
            return row.string
        else:
            return rows[0].string

    def dumpFiles(self, locations, lang):
        log("Dumping files for %s" % lang)
        files = {}
        for row in locations:
            try:
                files[row.filename].append(row)
            except KeyError:
                files[row.filename] = [row]

        hdf_map = []

        os.system("rm -rf %s/gen/tmpl" % (self.root))
        for file in files.keys():
            fname = "%s/gen/%s" % (self.root, file)
            try:
                os.makedirs(os.path.dirname(fname))
            except OSError, reason:
                if reason[0] != 17:
                    raise
            do_hdf = 0
            x = string.rfind(file, '.')
            if x != -1 and file[x:] == '.hdf':
                do_hdf = 1
            ofs = []
            for loc in files[file]:
                parts = string.split(loc.location, ':')
                if len(parts) == 3 and parts[0] == 'ofs' and do_hdf == 0:
                    ofs.append((int(parts[1]), int(parts[2]), loc.string_id))
                elif len(parts) == 2 and parts[0] == 'hdf' and do_hdf == 1:
                    hdf_map.append((parts[1], loc.string_id))
                else:
                    log("Invalid location for loc_id %s" % loc.loc_id)
                    continue
            if not do_hdf:
                ofs.sort()
                data = open(self.root + '/' + file).read()
                # ok, now we split up the original data into sections
                x = 0
                n = len(data)
                out = []
                #sys.stderr.write("%s\n" % repr(ofs))
                while len(ofs):
                    if ofs[0][0] > x:
                        out.append(data[x:ofs[0][0]])
                        x = ofs[0][0]
                    elif ofs[0][0] == x:
                        out.append(self.fetchString(ofs[0][2], lang))
                        x = ofs[0][0] + ofs[0][1]
                        ofs = ofs[1:]
                    else:
                        log("How did we get here? %s x=%d ofs=%d sid=%d" %
                            (file, x, ofs[0][0], ofs[0][2]))
                        log("Data[x:20]: %s" % data[x:20])
                        log("Data[ofs:20]: %s" % data[ofs[0][0]:20])
                        break
                if n > x:
                    out.append(data[x:])
                odata = string.join(out, '')
                open(fname, 'w').write(odata)

        if lang == "hdf":
            langs = self.languages
        else:
            langs = [lang]

        for d_lang in langs:
            # dumping the extracted strings
            hdf = self.stringsHDF('Lang.Extracted',
                                  locations,
                                  d_lang,
                                  tiered=1)
            fname = "%s/gen/tmpl/lang_%s.hdf" % (self.root, d_lang)
            hdf.writeFile(fname)
            data = open(fname).read()
            fp = open(fname, 'w')
            fp.write('## AUTOMATICALLY GENERATED -- DO NOT EDIT\n\n')
            fp.write(data)
            fp.write('\n#include "lang_map.hdf"\n')

            # dumping the hdf strings file
            if d_lang == "en":
                map_file = "%s/gen/tmpl/lang_map.hdf" % (self.root)
            else:
                map_file = "%s/gen/tmpl/%s/lang_map.hdf" % (self.root, d_lang)
            try:
                os.makedirs(os.path.dirname(map_file))
            except OSError, reason:
                if reason[0] != 17: raise
            map_hdf = neo_util.HDF()
            for (name, s_id) in hdf_map:
                str = hdf.getValue(
                    'Lang.Extracted.%d.%d.%s' %
                    (int(s_id) / TIER1_DIV, int(s_id) / TIER2_DIV, s_id), '')
                map_hdf.setValue(name, str)
            map_hdf.writeFile(map_file)
コード例 #28
0
ファイル: trans.py プロジェクト: sschoen/clearsilver
 def parseHDF(self, data):
     # Ok, we handle HDF files specially.. the theory is, we only
     # extract entire HDF elements which have the attribute Lang
     hdf = neo_util.HDF()
     hdf.readString(data, 1)
     return self.descendHDF(hdf, "")