def _configure_logging(test=False, **kwargs): root_logger = logging.getLogger() global _logging_configured if _logging_configured: root_logger.info( "Logging was already configured in this interpreter process. The currently " "registered handlers, formatters, filters and log levels will be left as is." ) else: root_logger.setLevel(logging.WARNING) if 'AWS_LAMBDA_LOG_GROUP_NAME' in os.environ: pass # On AWS Lambda, we assume that its runtime already configured logging appropriately elif len(root_logger.handlers) == 0: logging.basicConfig(**kwargs) else: # If this happens, the process can likely proceed but the underlying issue needs to be investigated. Some # module isn't playing nicely and configured logging before we had a chance to do so. The backtrace # included in the log message may look scary but it should aid in finding the culprit. root_logger.warning( "It appears that logging was already configured in this interpreter process. " "Currently registered handlers, formatters and filters will be left as is.", stack_info=True) debug = Config.debug_level() log_levels = main_log_levels if test: log_levels = {**log_levels, **test_log_levels} for logger, levels in log_levels.items(): if isinstance(logger, (str, type(None))): logger = logging.getLogger(logger) level = levels[min(debug, len(levels) - 1)] logger.setLevel(level) _logging_configured = True
def create_app(): app = DSSApp( __name__, validator_map={ 'body': DSSRequestBodyValidator, 'parameter': DSSParameterValidator, }, ) # The Flask/Connection app's logger has its own multi-line formatter and configuration. Rather than suppressing # it we let it do its thing, give it a special name and only enable it if DSS_DEBUG > 1. Most of the DSS web # app's logging is done through the DSSChaliceApp.app logger not the Flask app's logger. # app.app.logger_name = 'dss.api' debug = Config.debug_level() > 0 app.app.debug = debug app.app.logger.info('Flask debug is %s.', 'enabled' if debug else 'disabled') resolver = RestyResolver("dss.api", collection_endpoint_name="list") app.add_api('../dss-api.yml', resolver=resolver, validate_responses=True, arguments=os.environ) app.add_error_handler(DSSException, dss_exception_handler) return app
def __init__(self, timeout: float = 60, delay: float = 10) -> None: elasticsearch_binary = os.getenv("DSS_TEST_ES_PATH", "elasticsearch") tempdir = tempfile.TemporaryDirectory() # Set Elasticsearch's initial and max heap to 1.6 GiB, 40% of what's available on Travis, according to # guidance from https://www.elastic.co/guide/en/elasticsearch/reference/current/heap-size.html env = dict(os.environ, ES_JAVA_OPTIONS="-Xms1638m -Xmx1638m") # Work around https://github.com/travis-ci/travis-ci/issues/8408 if '_JAVA_OPTIONS' in env: # no coverage logger.warning( "_JAVA_OPTIONS is set. This may override the options just set via ES_JAVA_OPTIONS." ) port = networking.unused_tcp_port() transport_port = networking.unused_tcp_port() args = [ elasticsearch_binary, "-E", f"http.port={port}", "-E", f"transport.tcp.port={transport_port}", "-E", f"path.data={tempdir.name}", "-E", "logger.org.elasticsearch=" + ("info" if Config.debug_level() > 0 else "warn") ] logger.info("Running %r with environment %r", args, env) proc = subprocess.Popen(args, env=env) def check(): status = proc.poll() if status is not None: tempdir.cleanup() raise ChildProcessError( f"ES process died with status {status}") deadline = time.time() + timeout while True: check() time.sleep(delay) check() logger.info('Attempting to connect to ES instance at 127.0.0.1:%i', port) try: sock = socket.create_connection(("127.0.0.1", port), 1) except (ConnectionRefusedError, socket.timeout): logger.debug( 'Failed connecting to ES instance at 127.0.0.1:%i', port, exc_info=True) if time.time() + delay > deadline: proc.kill() tempdir.cleanup() raise else: sock.close() check() self.port = port self.proc = proc self.tempdir = tempdir break