示例#1
0
    # slurp through all the files
    eater = TokenEater(options)
    for filename in args:
        if filename == '-':
            if options.verbose:
                print ('Reading standard input')
            fp = sys.stdin
            closep = 0
        else:
            if options.verbose:
                print ('Working on %s') % filename
            if filename.endswith('.html'):
                from uliweb.core.template import template_file_py
                from cStringIO import StringIO
                text = template_file_py(filename, skip_extern=True)
                fp = StringIO(text)
            else:
                fp = open(filename)
                closep = 1
        try:
            eater.set_filename(filename)
            try:
                tokenize.tokenize(fp.readline, eater)
            except tokenize.TokenError, e:
                print >> sys.stderr, '%s: %s, line %d, column %d' % (
                    e[0], filename, e[1][0], e[1][1])
        finally:
            if closep:
                fp.close()
示例#2
0
def extrace_files(files, outputfile, opts=None, vars=None):
    global _py_ext
    import logging

    log = logging.getLogger('pygettext')
    
    opts = opts or {}
    vars = vars or {}
    
    _py_ext = ['.py', '.ini', '.html']
    class Options:
        # constants
        GNU = 1
        SOLARIS = 2
        # defaults
        extractall = 0 # FIXME: currently this option has no effect at all.
        escape = 0
        keywords = ['_', 'gettext', 'ngettext', 'ungettext', 'ugettext']
        outpath = ''
        outfile = outputfile
        writelocations = 1
        locationstyle = GNU
        verbose = 0
        width = 78
        excludefilename = ''
        docstrings = 0
        nodocstrings = {}
        toexclude = []
    
    options = Options()

#    make_escapes(options.escape)
    options.keywords.extend(default_keywords)
    for k, v in opts.items():
        if v and hasattr(options, k):
            _v = getattr(options, k)
            if isinstance(_v, list):
                _v.extend(v)
            elif isinstance(_v, dict):
                _v.update(v)
            else:
                setattr(options, k, v)
    
    if not isinstance(files, list):
        files = getFilesForName(files)
    eater = TokenEater(options, vars=vars)
    for filename in files:
        if options.verbose:
            print ('Working on %s') % filename
        if not os.path.exists(filename):
            continue
        if filename.endswith('.html'):
            from uliweb.core import template
            from cStringIO import StringIO
            from uliweb.core.template import template_file_py
            text = template_file_py(filename, skip_extern=True, log=log)
            fp = StringIO(text)
            closep = 0
        else:
            fp = open(filename)
            closep = 1
        
        try:
            eater.set_filename(filename)
            try:
                tokenize.tokenize(fp.readline, eater)
            except tokenize.TokenError, e:
                print >> sys.stderr, '%s: %s, line %d, column %d' % (
                    e[0], filename, e[1][0], e[1][1])
        finally:
            if closep:
                fp.close()
    
    if options.outfile == '-':
        fp = sys.stdout
        closep = 0
    else:
        if options.outpath:
            options.outfile = os.path.join(options.outpath, options.outfile)
        path = os.path.dirname(options.outfile)
        if path:
            if not os.path.exists(path):
                try:
                    os.makedirs(path)
                except:
                    pass
        fp = open(options.outfile, 'w')
        closep = 1
    try:
        eater.write(fp)
    finally:
        if closep:
            fp.close()
示例#3
0
文件: pygettext.py 项目: 28sui/uliweb
    # slurp through all the files
    eater = TokenEater(options)
    for filename in args:
        if filename == '-':
            if options.verbose:
                print ('Reading standard input')
            fp = sys.stdin
            closep = 0
        else:
            if options.verbose:
                print ('Working on %s') % filename
            if filename.endswith('.html'):
                from uliweb.core.template import template_file_py
                from cStringIO import StringIO
                text = template_file_py(filename, skip_extern=True, multilines=True)
                fp = StringIO(text)
            else:
                fp = open(filename)
                closep = 1
        try:
            eater.set_filename(filename)
            try:
                tokenize.tokenize(fp.readline, eater)
            except tokenize.TokenError, e:
                print >> sys.stderr, '%s: %s, line %d, column %d' % (
                    e[0], filename, e[1][0], e[1][1])
        finally:
            if closep:
                fp.close()
示例#4
0
def extrace_files(files, outputfile, opts=None, vars=None):
    global _py_ext
    import logging

    log = logging.getLogger('pygettext')

    opts = opts or {}
    vars = vars or {}

    _py_ext = ['.py', '.ini', '.html']

    class Options:
        # constants
        GNU = 1
        SOLARIS = 2
        # defaults
        extractall = 0  # FIXME: currently this option has no effect at all.
        escape = 0
        keywords = ['_', 'gettext', 'ngettext', 'ungettext', 'ugettext']
        outpath = ''
        outfile = outputfile
        writelocations = 1
        locationstyle = GNU
        verbose = 0
        width = 78
        excludefilename = ''
        docstrings = 0
        nodocstrings = {}
        toexclude = []

    options = Options()

    #    make_escapes(options.escape)
    options.keywords.extend(default_keywords)
    for k, v in opts.items():
        if v and hasattr(options, k):
            _v = getattr(options, k)
            if isinstance(_v, list):
                _v.extend(v)
            elif isinstance(_v, dict):
                _v.update(v)
            else:
                setattr(options, k, v)

    if not isinstance(files, list):
        files = getFilesForName(files)
    eater = TokenEater(options, vars=vars)
    for filename in files:
        if options.verbose:
            print('Working on %s') % filename
        if not os.path.exists(filename):
            continue
        if filename.endswith('.html'):
            from uliweb.core import template
            from cStringIO import StringIO
            from uliweb.core.template import template_file_py
            text = template_file_py(filename,
                                    skip_extern=True,
                                    log=log,
                                    multilines=True)
            fp = StringIO(text)
            closep = 0
        else:
            fp = open(filename)
            closep = 1

        try:
            eater.set_filename(filename)
            try:
                tokenize.tokenize(fp.readline, eater)
            except tokenize.TokenError, e:
                print >> sys.stderr, '%s: %s, line %d, column %d' % (
                    e[0], filename, e[1][0], e[1][1])
        finally:
            if closep:
                fp.close()

    if options.outfile == '-':
        fp = sys.stdout
        closep = 0
    else:
        if options.outpath:
            options.outfile = os.path.join(options.outpath, options.outfile)
        path = os.path.dirname(options.outfile)
        if path:
            if not os.path.exists(path):
                try:
                    os.makedirs(path)
                except:
                    pass
        fp = open(options.outfile, 'w')
        closep = 1
    try:
        eater.write(fp)
    finally:
        if closep:
            fp.close()
示例#5
0
 # slurp through all the files
 eater = TokenEater(options)
 for filename in args:
     if filename == '-':
         if options.verbose:
             print('Reading standard input')
         fp = sys.stdin
         closep = 0
     else:
         if options.verbose:
             print('Working on %s') % filename
         if filename.endswith('.html'):
             from uliweb.core.template import template_file_py
             from cStringIO import StringIO
             text = template_file_py(filename,
                                     skip_extern=True,
                                     multilines=True)
             fp = StringIO(text)
         else:
             fp = open(filename)
             closep = 1
     try:
         eater.set_filename(filename)
         try:
             tokenize.tokenize(fp.readline, eater)
         except tokenize.TokenError, e:
             print >> sys.stderr, '%s: %s, line %d, column %d' % (
                 e[0], filename, e[1][0], e[1][1])
     finally:
         if closep:
             fp.close()
示例#6
0
def main():
    global default_keywords
    try:
        opts, args = getopt.getopt(sys.argv[1:], 'ad:DEhk:Kno:p:S:Vvw:x:X:f:',
                                   [
                                       'extract-all',
                                       'default-domain=',
                                       'escape',
                                       'help',
                                       'keyword=',
                                       'no-default-keywords',
                                       'add-location',
                                       'no-location',
                                       'output=',
                                       'output-dir=',
                                       'style=',
                                       'verbose',
                                       'version',
                                       'width=',
                                       'exclude-file=',
                                       'docstrings',
                                       'no-docstrings',
                                   ])
    except getopt.error as msg:
        usage(1, msg)

    # for holding option values
    class Options:
        # constants
        GNU = 1
        SOLARIS = 2
        # defaults
        extractall = 0  # FIXME: currently this option has no effect at all.
        escape = 0
        keywords = ['ugettext', 'ungettext']
        outpath = ''
        outfile = 'messages.pot'
        writelocations = 1
        locationstyle = GNU
        verbose = 0
        width = 78
        excludefilename = ''
        docstrings = 0
        nodocstrings = {}

    options = Options()
    locations = {
        'gnu': options.GNU,
        'solaris': options.SOLARIS,
    }

    files = ''

    # parse options
    for opt, arg in opts:
        if opt in ('-h', '--help'):
            usage(0)
        elif opt in ('-a', '--extract-all'):
            options.extractall = 1
        elif opt in ('-d', '--default-domain'):
            options.outfile = arg + '.pot'
        elif opt in ('-E', '--escape'):
            options.escape = 1
        elif opt in ('-D', '--docstrings'):
            options.docstrings = 1
        elif opt in ('-k', '--keyword'):
            options.keywords.append(arg)
        elif opt in ('-K', '--no-default-keywords'):
            default_keywords = []
        elif opt in ('-n', '--add-location'):
            options.writelocations = 1
        elif opt in ('--no-location', ):
            options.writelocations = 0
        elif opt in ('-S', '--style'):
            options.locationstyle = locations.get(arg.lower())
            if options.locationstyle is None:
                usage(1, ('Invalid value for --style: %s') % arg)
        elif opt in ('-o', '--output'):
            options.outfile = arg
        elif opt in ('-p', '--output-dir'):
            options.outpath = arg
        elif opt in ('-v', '--verbose'):
            options.verbose = 1
        elif opt in ('-V', '--version'):
            print(('pygettext.py (xgettext for Python) %s') % __version__)
            sys.exit(0)
        elif opt in ('-w', '--width'):
            try:
                options.width = int(arg)
            except ValueError:
                usage(1, ('--width argument must be an integer: %s') % arg)
        elif opt in ('-x', '--exclude-file'):
            options.excludefilename = arg
        elif opt in ('-X', '--no-docstrings'):
            fp = open(arg)
            try:
                while 1:
                    line = fp.readline()
                    if not line:
                        break
                    options.nodocstrings[line[:-1]] = 1
            finally:
                fp.close()
        elif opt == '-f':
            files = arg

    # calculate escapes


#    make_escapes(options.escape)

# calculate all keywords
    options.keywords.extend(default_keywords)

    # initialize list of strings to exclude
    if options.excludefilename:
        try:
            fp = open(options.excludefilename)
            options.toexclude = fp.readlines()
            fp.close()
        except IOError:
            print(("Can't read --exclude-file: %s") % options.excludefilename,
                  file=sys.stderr)
            sys.exit(1)
    else:
        options.toexclude = []

    # resolve args to module lists
    expanded = []
    for arg in args:
        if arg == '-':
            expanded.append(arg)
        else:
            expanded.extend(getFilesForName(arg))
    args = expanded

    if files:
        lines = open(files).readlines()
        for line in lines:
            args.append(line.strip())

    # slurp through all the files
    eater = TokenEater(options)
    for filename in args:
        if filename == '-':
            if options.verbose:
                print('Reading standard input')
            fp = sys.stdin
            closep = 0
        else:
            if options.verbose:
                print(('Working on %s') % filename)
            if filename.endswith('.html'):
                from uliweb.core.template import template_file_py
                from io import StringIO
                text = template_file_py(filename,
                                        skip_extern=True,
                                        multilines=True)
                fp = StringIO(text)
            else:
                fp = open(filename)
                closep = 1
        try:
            eater.set_filename(filename)
            try:
                tokenize.tokenize(fp.readline, eater)
            except tokenize.TokenError as e:
                print('%s: %s, line %d, column %d' %
                      (e[0], filename, e[1][0], e[1][1]),
                      file=sys.stderr)
        finally:
            if closep:
                fp.close()

    # write the output
    if options.outfile == '-':
        fp = sys.stdout
        closep = 0
    else:
        if options.outpath:
            options.outfile = os.path.join(options.outpath, options.outfile)
        path = os.path.dirname(options.outfile)
        if path:
            if not os.path.exists(path):
                try:
                    os.makedirs(path)
                except:
                    pass
        fp = open(options.outfile, 'w')
        closep = 1
    try:
        eater.write(fp)
    finally:
        if closep:
            fp.close()
示例#7
0
文件: pygettext.py 项目: znanl/uliweb
    # slurp through all the files
    eater = TokenEater(options)
    for filename in args:
        if filename == '-':
            if options.verbose:
                print('Reading standard input')
            fp = sys.stdin
            closep = 0
        else:
            if options.verbose:
                print('Working on %s') % filename
            if filename.endswith('.html'):
                from uliweb.core.template import template_file_py
                from cStringIO import StringIO
                text = template_file_py(filename, skip_extern=True)
                fp = StringIO(text)
            else:
                fp = open(filename)
                closep = 1
        try:
            eater.set_filename(filename)
            try:
                tokenize.tokenize(fp.readline, eater)
            except tokenize.TokenError, e:
                print >> sys.stderr, '%s: %s, line %d, column %d' % (
                    e[0], filename, e[1][0], e[1][1])
        finally:
            if closep:
                fp.close()