def test_extract(self): register_extractors() plugin = LinguaMakoExtractor({"comment-tags": "TRANSLATOR"}) messages = list( plugin(os.path.join(template_base, "gettext.mako"), MockOptions())) msgids = [(m.msgid, m.msgid_plural) for m in messages] self.assertEqual( msgids, [ ("Page arg 1", None), ("Page arg 2", None), ("Begin", None), ("Hi there!", None), ("Hello", None), ("Welcome", None), ("Yo", None), ("The", None), ("bunny", "bunnies"), ("Goodbye", None), ("Babel", None), ("hella", "hellas"), ("The", None), ("bunny", "bunnies"), ("Goodbye, really!", None), ("P.S. byebye", None), ("Top", None), (u"foo", None), ("hoho", None), (u"bar", None), ("Inside a p tag", None), ("Later in a p tag", None), ("No action at a distance.", None), ], )
def test_extract(self): register_extractors() plugin = LinguaMakoExtractor({'comment-tags': 'TRANSLATOR'}) messages = list( plugin(os.path.join(template_base, 'gettext.mako'), MockOptions())) msgids = [(m.msgid, m.msgid_plural) for m in messages] self.assertEqual( msgids, [ ('Page arg 1', None), ('Page arg 2', None), ('Begin', None), ('Hi there!', None), ('Hello', None), ('Welcome', None), ('Yo', None), ('The', None), ('bunny', 'bunnies'), ('Goodbye', None), ('Babel', None), ('hella', 'hellas'), ('The', None), ('bunny', 'bunnies'), ('Goodbye, really!', None), ('P.S. byebye', None), ('Top', None), (u'foo', None), ('hoho', None), (u'bar', None), ('Inside a p tag', None), ('Later in a p tag', None), ('No action at a distance.', None)])
def main(): parser = argparse.ArgumentParser( description='Extract translateable strings.') parser.add_argument('-c', '--config', metavar='CONFIG', help='Read configuration from CONFIG file') # Input options parser.add_argument('-f', '--files-from', metavar='FILE', help='Get list of files to process from FILE') parser.add_argument( '-D', '--directory', metavar='DIRECTORY', action='append', default=[], help='Add DIRECTORY to list of paths to check for input files') parser.add_argument('file', nargs='*', help='Source file to process') parser.add_argument('--list-extractors', action='store_true', help='List all known extraction plugins') # Output options parser.add_argument('-o', '--output', metavar='FILE', default='messages.pot', help='Filename for generated POT file') parser.add_argument('--no-location', action='store_false', dest='location', help='Do not include location information') parser.add_argument( '--no-linenumbers', action='store_true', dest='no_linenumbers', help='Do not include line numbers in location information') parser.add_argument('-n', '--add-location', action='store_true', dest='location', default=True, help='Include location information (default)') parser.add_argument('-w', '--width', metavar='NUMBER', default=79, help='Output width') parser.add_argument( '-s', '--sort-output', # babel compatibility action='store_const', const='msgid', dest='sort', help='Order messages by their msgid') parser.add_argument('-F', '--sort-by-file', action='store_const', const='location', dest='sort', help='Order messages by file location') # Extraction configuration parser.add_argument('-d', '--domain', help='Domain to extract') parser.add_argument('-k', '--keyword', metavar='WORD', dest='keywords', action='append', default=[], nargs='?', help='Look for WORD as additional keyword') parser.add_argument( '-C', '--add-comments', metavar='TAG', dest='comment_tag', const=True, nargs='?', help= 'Add comments prefixed by TAG to messages, or all if no tag is given') # POT metadata parser.add_argument('--copyright-holder', metavar='STRING', help='Specifies the copyright holder for the texts') parser.add_argument('--package-name', metavar='NAME', default=u'PACKAGE', help='Package name to use in the generated POT file') parser.add_argument( '--package-version', metavar='Version', default='1.0', help='Package version to use in the generated POT file') parser.add_argument('--msgid-bugs-address', metavar='EMAIL', help='Email address bugs should be send to') options = parser.parse_args() register_extractors() register_babel_plugins() if options.list_extractors: for extractor in sorted(EXTRACTORS): print('%-17s %s' % (extractor, EXTRACTORS[extractor].__doc__ or '')) return if options.config: read_config(options.config) else: user_home = os.path.expanduser('~') global_config = os.path.join(user_home, '.config', 'lingua') if os.path.exists(global_config): read_config(global_config) catalog = create_catalog(options) scanned = 0 for filename in no_duplicates(list_files(options)): real_filename = find_file(filename, options.directory) if real_filename is None: print('Can not find file %s' % filename, file=sys.stderr) sys.exit(1) extractor = get_extractor(real_filename) if extractor is None: print('No extractor available for file %s' % filename, file=sys.stderr) sys.exit(1) for message in extractor(real_filename, options): entry = catalog.find(message.msgid, msgctxt=message.msgctxt) if entry is None: entry = POEntry(msgctxt=message.msgctxt, msgid=message.msgid) if message.msgid_plural: entry.msgid_plural = message.msgid_plural entry.msgstr_plural[0] = '' entry.msgstr_plural[1] = '' catalog.append(entry) entry.update(message, add_occurrences=options.location) scanned += 1 if not scanned: print('No files scanned, aborting', file=sys.stderr) sys.exit(1) if not catalog: print('No translatable strings found, aborting', file=sys.stderr) sys.exit(2) if options.sort == 'msgid': catalog.sort(key=attrgetter('msgid')) elif options.sort == 'location': # Order the occurrences themselves, so the output is consistent catalog.sort(key=lambda m: m.occurrences.sort() or m.occurrences) if options.no_linenumbers: for entry in catalog: strip_linenumbers(entry) save_catalog(catalog, options.output)
def main(): parser = argparse.ArgumentParser( description='Extract translateable strings.') parser.add_argument('-c', '--config', metavar='CONFIG', help='Read configuration from CONFIG file') # Input options parser.add_argument('-f', '--files-from', metavar='FILE', help='Get list of files to process from FILE') parser.add_argument('-D', '--directory', metavar='DIRECTORY', action='append', default=[], help='Add DIRECTORY to list of paths to check for input files') parser.add_argument('file', nargs='*', help='Source file to process') parser.add_argument('--list-extractors', action='store_true', help='List all known extraction plugins') # Output options parser.add_argument('-o', '--output', metavar='FILE', default='messages.pot', help='Filename for generated POT file') parser.add_argument('--no-location', action='store_false', dest='location', help='Do not include location information') parser.add_argument('--no-linenumbers', action='store_true', dest='no_linenumbers', help='Do not include line numbers in location information') parser.add_argument('-n', '--add-location', action='store_true', dest='location', default=True, help='Include location information (default)') parser.add_argument('-w', '--width', metavar='NUMBER', default=79, help='Output width') parser.add_argument('-s', '--sort-output', # babel compatibility action='store_const', const='msgid', dest='sort', help='Order messages by their msgid') parser.add_argument('-F', '--sort-by-file', action='store_const', const='location', dest='sort', help='Order messages by file location') # Extraction configuration parser.add_argument('-d', '--domain', help='Domain to extract') parser.add_argument('-k', '--keyword', metavar='WORD', dest='keywords', action='append', default=[], nargs='?', help='Look for WORD as additional keyword') parser.add_argument('-C', '--add-comments', metavar='TAG', dest='comment_tag', const=True, nargs='?', help='Add comments prefixed by TAG to messages, or all if no tag is given') # POT metadata parser.add_argument('--copyright-holder', metavar='STRING', help='Specifies the copyright holder for the texts') parser.add_argument('--package-name', metavar='NAME', default=u'PACKAGE', help='Package name to use in the generated POT file') parser.add_argument('--package-version', metavar='Version', default='1.0', help='Package version to use in the generated POT file') parser.add_argument('--msgid-bugs-address', metavar='EMAIL', help='Email address bugs should be send to') options = parser.parse_args() register_extractors() register_babel_plugins() if options.list_extractors: for extractor in sorted(EXTRACTORS): print('%-17s %s' % (extractor, EXTRACTORS[extractor].__doc__ or '')) return if options.config: read_config(options.config) else: user_home = os.path.expanduser('~') global_config = os.path.join(user_home, '.config', 'lingua') if os.path.exists(global_config): read_config(global_config) catalog = create_catalog(options) scanned = 0 for filename in no_duplicates(list_files(options)): real_filename = find_file(filename, options.directory) if real_filename is None: print('Can not find file %s' % filename, file=sys.stderr) sys.exit(1) extractor = get_extractor(real_filename) if extractor is None: print('No extractor available for file %s' % filename, file=sys.stderr) sys.exit(1) for message in extractor(real_filename, options): entry = catalog.find(message.msgid, msgctxt=message.msgctxt) if entry is None: entry = POEntry(msgctxt=message.msgctxt, msgid=message.msgid) if message.msgid_plural: entry.msgid_plural = message.msgid_plural entry.msgstr_plural[0] = '' entry.msgstr_plural[1] = '' catalog.append(entry) entry.update(message, add_occurrences=options.location) scanned += 1 if not scanned: print('No files scanned, aborting', file=sys.stderr) sys.exit(1) if not catalog: print('No translatable strings found, aborting', file=sys.stderr) sys.exit(2) if options.sort == 'msgid': catalog.sort(key=attrgetter('msgid')) elif options.sort == 'location': # Order the occurrences themselves, so the output is consistent catalog.sort(key=lambda m: m.occurrences.sort() or m.occurrences) if options.no_linenumbers: for entry in catalog: strip_linenumbers(entry) save_catalog(catalog, options.output)
def register_lingua_extractors(self): from lingua.extractors import register_extractors register_extractors()
def localepot(request: pyramid.request.Request) -> pyramid.response.Response: """Get the pot from an HTTP request.""" # Build the list of files to be processed sources = [] sources += glob.glob( f"/app/{request.registry.package_name}/static-ngeo/js/apps/*.html.ejs") sources += glob.glob( f"/app/{request.registry.package_name}/static-ngeo/js/**/*.js", recursive=True) sources += glob.glob( f"/app/{request.registry.package_name}/static-ngeo/js/**/*.html", recursive=True) sources += glob.glob("/usr/local/tomcat/webapps/ROOT/**/config.yaml", recursive=True) sources += ["/etc/geomapfish/config.yaml", "/app/development.ini"] # The following code is a modified version of the main function of this file: # https://github.com/wichert/lingua/blob/master/src/lingua/extract.py global _INITIALIZED if not _INITIALIZED: register_extractors() register_babel_plugins() _INITIALIZED = True with open("/app/lingua-client.cfg", encoding="utf-8") as config_file: read_config(config_file) _INITIALIZED = True catalog = create_catalog( width=110, copyright_holder="", package_name="GeoMapFish-project", package_version="1.0", msgid_bugs_address=None, ) for filename in no_duplicates(list_files(None, sources)): real_filename = find_file(filename) if real_filename is None: LOG.error("Can not find file %s", filename) raise HTTPInternalServerError(f"Can not find file {filename}") extractor = get_extractor(real_filename) if extractor is None: LOG.error("No extractor available for file %s", filename) raise HTTPInternalServerError( f"No extractor available for file {filename}") extractor_options = ExtractorOptions( comment_tag=True, domain=None, keywords=None, ) for message in extractor(real_filename, extractor_options): entry = catalog.find(message.msgid, msgctxt=message.msgctxt) if entry is None: entry = POEntry(msgctxt=message.msgctxt, msgid=message.msgid) catalog.append(entry) entry.update(message) # for entry in catalog: # strip_linenumbers(entry) # Build the response request.response.text = catalog.__unicode__() set_common_headers(request, "api", Cache.PUBLIC, content_type="text/x-gettext-translation") return request.response