def set_blocking_signal_threshold(self, seconds, action): if not hasattr(signal, "setitimer"): gen_log.error( "set_blocking_signal_threshold requires a signal module " "with the setitimer method") return self._blocking_signal_threshold = seconds if seconds is not None: signal.signal(signal.SIGALRM, action if action is not None else signal.SIG_DFL)
def cpu_count(): """Returns the number of processors on this machine.""" if multiprocessing is None: return 1 try: return multiprocessing.cpu_count() except NotImplementedError: pass try: return os.sysconf("SC_NPROCESSORS_CONF") except (AttributeError, ValueError): pass gen_log.error("Could not detect number of processors; assuming 1") return 1
def load_gettext_translations(directory, domain): """Loads translations from `gettext`'s locale tree Locale tree is similar to system's ``/usr/share/locale``, like:: {directory}/{lang}/LC_MESSAGES/{domain}.mo Three steps are required to have your app translated: 1. Generate POT translation file:: xgettext --language=Python --keyword=_:1,2 -d mydomain file1.py file2.html etc 2. Merge against existing POT file:: msgmerge old.po mydomain.po > new.po 3. Compile:: msgfmt mydomain.po -o {directory}/pt_BR/LC_MESSAGES/mydomain.mo """ import gettext global _translations global _supported_locales global _use_gettext _translations = {} for lang in os.listdir(directory): if lang.startswith('.'): continue # skip .svn, etc if os.path.isfile(os.path.join(directory, lang)): continue try: os.stat( os.path.join(directory, lang, "LC_MESSAGES", domain + ".mo")) _translations[lang] = gettext.translation(domain, directory, languages=[lang]) except Exception as e: gen_log.error("Cannot load translation for '%s': %s", lang, str(e)) continue _supported_locales = frozenset( list(_translations.keys()) + [_default_locale]) _use_gettext = True gen_log.debug("Supported locales: %s", sorted(_supported_locales))
def _server_request_loop(self, delegate): try: while True: conn = HTTP1Connection(self.stream, False, self.params, self.context) request_delegate = delegate.start_request(self, conn) try: ret = yield conn.read_response(request_delegate) except (iostream.StreamClosedError, iostream.UnsatisfiableReadError): return except _QuietException: # This exception was already logged. conn.close() return except Exception: gen_log.error("Uncaught exception", exc_info=True) conn.close() return if not ret: return yield gen.moment finally: delegate.on_close(self)
def main(**kwargs): """A simple test runner. This test runner is essentially equivalent to `unittest.main` from the standard library, but adds support for tornado-style option parsing and log formatting. It is *not* necessary to use this `main` function to run tests using `AsyncTestCase`; these tests are self-contained and can run with any test runner. The easiest way to run a test is via the command line:: python -m tornado.testing tornado.test.stack_context_test See the standard library unittest module for ways in which tests can be specified. Projects with many tests may wish to define a test script like ``tornado/test/runtests.py``. This script should define a method ``all()`` which returns a test suite and then call `tornado.testing.main()`. Note that even when a test script is used, the ``all()`` test suite may be overridden by naming a single test on the command line:: # Runs all tests python -m tornado.test.runtests # Runs one test python -m tornado.test.runtests tornado.test.stack_context_test Additional keyword arguments passed through to ``unittest.main()``. For example, use ``tornado.testing.main(verbosity=2)`` to show many test details as they are run. See http://docs.python.org/library/unittest.html#unittest.main for full argument list. """ from salt.ext.tornado.options import define, options, parse_command_line define('exception_on_interrupt', type=bool, default=True, help=("If true (default), ctrl-c raises a KeyboardInterrupt " "exception. This prints a stack trace but cannot interrupt " "certain operations. If false, the process is more reliably " "killed, but does not print a stack trace.")) # support the same options as unittest's command-line interface define('verbose', type=bool) define('quiet', type=bool) define('failfast', type=bool) define('catch', type=bool) define('buffer', type=bool) argv = [sys.argv[0]] + parse_command_line(sys.argv) if not options.exception_on_interrupt: signal.signal(signal.SIGINT, signal.SIG_DFL) if options.verbose is not None: kwargs['verbosity'] = 2 if options.quiet is not None: kwargs['verbosity'] = 0 if options.failfast is not None: kwargs['failfast'] = True if options.catch is not None: kwargs['catchbreak'] = True if options.buffer is not None: kwargs['buffer'] = True if __name__ == '__main__' and len(argv) == 1: print("No tests specified", file=sys.stderr) sys.exit(1) try: # In order to be able to run tests by their fully-qualified name # on the command line without importing all tests here, # module must be set to None. Python 3.2's unittest.main ignores # defaultTest if no module is given (it tries to do its own # test discovery, which is incompatible with auto2to3), so don't # set module if we're not asking for a specific test. if len(argv) > 1: unittest.main(module=None, argv=argv, **kwargs) else: unittest.main(defaultTest="all", argv=argv, **kwargs) except SystemExit as e: if e.code == 0: gen_log.info('PASS') else: gen_log.error('FAIL') raise
def load_translations(directory, encoding=None): """Loads translations from CSV files in a directory. Translations are strings with optional Python-style named placeholders (e.g., ``My name is %(name)s``) and their associated translations. The directory should have translation files of the form ``LOCALE.csv``, e.g. ``es_GT.csv``. The CSV files should have two or three columns: string, translation, and an optional plural indicator. Plural indicators should be one of "plural" or "singular". A given string can have both singular and plural forms. For example ``%(name)s liked this`` may have a different verb conjugation depending on whether %(name)s is one name or a list of names. There should be two rows in the CSV file for that string, one with plural indicator "singular", and one "plural". For strings with no verbs that would change on translation, simply use "unknown" or the empty string (or don't include the column at all). The file is read using the `csv` module in the default "excel" dialect. In this format there should not be spaces after the commas. If no ``encoding`` parameter is given, the encoding will be detected automatically (among UTF-8 and UTF-16) if the file contains a byte-order marker (BOM), defaulting to UTF-8 if no BOM is present. Example translation ``es_LA.csv``:: "I love you","Te amo" "%(name)s liked this","A %(name)s les gustó esto","plural" "%(name)s liked this","A %(name)s le gustó esto","singular" .. versionchanged:: 4.3 Added ``encoding`` parameter. Added support for BOM-based encoding detection, UTF-16, and UTF-8-with-BOM. """ global _translations global _supported_locales _translations = {} for path in os.listdir(directory): if not path.endswith(".csv"): continue locale, extension = path.split(".") if not re.match("[a-z]+(_[A-Z]+)?$", locale): gen_log.error("Unrecognized locale %r (path: %s)", locale, os.path.join(directory, path)) continue full_path = os.path.join(directory, path) if encoding is None: # Try to autodetect encoding based on the BOM. with open(full_path, 'rb') as f: data = f.read(len(codecs.BOM_UTF16_LE)) if data in (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE): encoding = 'utf-16' else: # utf-8-sig is "utf-8 with optional BOM". It's discouraged # in most cases but is common with CSV files because Excel # cannot read utf-8 files without a BOM. encoding = 'utf-8-sig' if PY3: # python 3: csv.reader requires a file open in text mode. # Force utf8 to avoid dependence on $LANG environment variable. f = open(full_path, "r", encoding=encoding) else: # python 2: csv can only handle byte strings (in ascii-compatible # encodings), which we decode below. Transcode everything into # utf8 before passing it to csv.reader. f = BytesIO() with codecs.open(full_path, "r", encoding=encoding) as infile: f.write(escape.utf8(infile.read())) f.seek(0) _translations[locale] = {} for i, row in enumerate(csv.reader(f)): if not row or len(row) < 2: continue row = [escape.to_unicode(c).strip() for c in row] english, translation = row[:2] if len(row) > 2: plural = row[2] or "unknown" else: plural = "unknown" if plural not in ("plural", "singular", "unknown"): gen_log.error("Unrecognized plural indicator %r in %s line %d", plural, path, i + 1) continue _translations[locale].setdefault(plural, {})[english] = translation f.close() _supported_locales = frozenset( list(_translations.keys()) + [_default_locale]) gen_log.debug("Supported locales: %s", sorted(_supported_locales))