def start_infra(asynchronous=False, apis=None): try: # load plugins load_plugins() event_publisher.fire_event(event_publisher.EVENT_START_INFRA, {'d': in_docker() and 1 or 0, 'c': in_ci() and 1 or 0}) # set up logging setup_logging() # prepare APIs apis = canonicalize_api_names(apis) # set environment os.environ['AWS_REGION'] = config.DEFAULT_REGION os.environ['ENV'] = ENV_DEV # register signal handlers if not os.environ.get(ENV_INTERNAL_TEST_RUN): register_signal_handlers() # make sure AWS credentials are configured, otherwise boto3 bails on us check_aws_credentials() # install libs if not present install.install_components(apis) # Some services take a bit to come up sleep_time = 5 # start services thread = None if 'elasticsearch' in apis or 'es' in apis: sleep_time = max(sleep_time, 10) # loop through plugins and start each service for name, plugin in SERVICE_PLUGINS.items(): if name in apis: t1 = plugin.start(asynchronous=True) thread = thread or t1 time.sleep(sleep_time) # ensure that all infra components are up and running check_infra(apis=apis) # restore persisted data restore_persisted_data(apis=apis) print('Ready.') sys.stdout.flush() if not asynchronous and thread: # this is a bit of an ugly hack, but we need to make sure that we # stay in the execution context of the main thread, otherwise our # signal handlers don't work while True: time.sleep(1) return thread except KeyboardInterrupt: print('Shutdown') except Exception as e: print('Error starting infrastructure: %s %s' % (e, traceback.format_exc())) sys.stdout.flush() raise e finally: if not asynchronous: stop_infra()
def do_start_infra(asynchronous, apis, is_in_docker): # import to avoid cyclic dependency from localstack.services.edge import BOOTSTRAP_LOCK event_publisher.fire_event(event_publisher.EVENT_START_INFRA, {'d': is_in_docker and 1 or 0, 'c': in_ci() and 1 or 0}) # set up logging setup_logging() # prepare APIs apis = canonicalize_api_names(apis) @log_duration() def prepare_environment(): # set environment os.environ['AWS_REGION'] = config.DEFAULT_REGION os.environ['ENV'] = ENV_DEV # register signal handlers if not is_local_test_mode(): register_signal_handlers() # make sure AWS credentials are configured, otherwise boto3 bails on us check_aws_credentials() @log_duration() def prepare_installation(): # install libs if not present install.install_components(apis) @log_duration() def start_api_services(): # Some services take a bit to come up sleep_time = 5 # start services thread = None # loop through plugins and start each service for name, plugin in SERVICE_PLUGINS.items(): if plugin.is_enabled(api_names=apis): record_service_health(name, 'starting') t1 = plugin.start(asynchronous=True) thread = thread or t1 time.sleep(sleep_time) # ensure that all infra components are up and running check_infra(apis=apis) # restore persisted data persistence.restore_persisted_data(apis=apis) return thread prepare_environment() prepare_installation() with BOOTSTRAP_LOCK: thread = start_api_services() print(READY_MARKER_OUTPUT) sys.stdout.flush() return thread
def do_start_infra(asynchronous, apis, is_in_docker): event_publisher.fire_event(event_publisher.EVENT_START_INFRA, { 'd': is_in_docker and 1 or 0, 'c': in_ci() and 1 or 0 }) # set up logging setup_logging() # prepare APIs apis = canonicalize_api_names(apis) # set environment os.environ['AWS_REGION'] = config.DEFAULT_REGION os.environ['ENV'] = ENV_DEV # register signal handlers if not is_local_test_mode(): register_signal_handlers() # make sure AWS credentials are configured, otherwise boto3 bails on us check_aws_credentials() # install libs if not present install.install_components(apis) # Some services take a bit to come up sleep_time = 5 # start services thread = None # loop through plugins and start each service for name, plugin in SERVICE_PLUGINS.items(): if plugin.is_enabled(api_names=apis): record_service_health(name, 'starting') t1 = plugin.start(asynchronous=True) thread = thread or t1 time.sleep(sleep_time) # ensure that all infra components are up and running check_infra(apis=apis) # restore persisted data persistence.restore_persisted_data(apis=apis) print('Ready.') sys.stdout.flush() if not asynchronous and thread: # this is a bit of an ugly hack, but we need to make sure that we # stay in the execution context of the main thread, otherwise our # signal handlers don't work sleep_forever() return thread
def start_infra(asynchronous=False, apis=None): try: os.environ[LOCALSTACK_INFRA_PROCESS] = '1' is_in_docker = in_docker() # print a warning if we're not running in Docker but using Docker based LAMBDA_EXECUTOR if not is_in_docker and 'docker' in config.LAMBDA_EXECUTOR and not is_linux( ): print(( '!WARNING! - Running outside of Docker with $LAMBDA_EXECUTOR=%s can lead to ' 'problems on your OS. The environment variable $LOCALSTACK_HOSTNAME may not ' 'be properly set in your Lambdas.') % config.LAMBDA_EXECUTOR) if is_in_docker and config.LAMBDA_REMOTE_DOCKER and not os.environ.get( 'HOST_TMP_FOLDER'): print( '!WARNING! - Looks like you have configured $LAMBDA_REMOTE_DOCKER=1 - ' "please make sure to configure $HOST_TMP_FOLDER to point to your host's $TMPDIR" ) # apply patches patch_urllib3_connection_pool(maxsize=128) # load plugins load_plugins() event_publisher.fire_event(event_publisher.EVENT_START_INFRA, { 'd': is_in_docker and 1 or 0, 'c': in_ci() and 1 or 0 }) # set up logging setup_logging() # prepare APIs apis = canonicalize_api_names(apis) # set environment os.environ['AWS_REGION'] = config.DEFAULT_REGION os.environ['ENV'] = ENV_DEV # register signal handlers if not os.environ.get(ENV_INTERNAL_TEST_RUN): register_signal_handlers() # make sure AWS credentials are configured, otherwise boto3 bails on us check_aws_credentials() # install libs if not present install.install_components(apis) # Some services take a bit to come up sleep_time = 5 # start services thread = None # loop through plugins and start each service for name, plugin in SERVICE_PLUGINS.items(): if plugin.is_enabled(api_names=apis): record_service_health(name, 'starting') t1 = plugin.start(asynchronous=True) thread = thread or t1 time.sleep(sleep_time) # ensure that all infra components are up and running check_infra(apis=apis) # restore persisted data persistence.restore_persisted_data(apis=apis) print('Ready.') sys.stdout.flush() if not asynchronous and thread: # this is a bit of an ugly hack, but we need to make sure that we # stay in the execution context of the main thread, otherwise our # signal handlers don't work while True: time.sleep(1) return thread except KeyboardInterrupt: print('Shutdown') except Exception as e: print('Error starting infrastructure: %s %s' % (e, traceback.format_exc())) sys.stdout.flush() raise e finally: if not asynchronous: stop_infra()
def do_start_infra(asynchronous, apis, is_in_docker): event_publisher.fire_event( event_publisher.EVENT_START_INFRA, { "d": is_in_docker and 1 or 0, "c": in_ci() and 1 or 0 }, ) # set up logging setup_logging() if config.DEVELOP: install.install_debugpy_and_dependencies() import debugpy LOG.info("Starting debug server at: %s:%s" % (constants.BIND_HOST, config.DEVELOP_PORT)) debugpy.listen((constants.BIND_HOST, config.DEVELOP_PORT)) if config.WAIT_FOR_DEBUGGER: debugpy.wait_for_client() # prepare APIs apis = canonicalize_api_names(apis) analytics.log.event("infra_start", apis=apis) @log_duration() def prepare_environment(): # set environment os.environ["AWS_REGION"] = config.DEFAULT_REGION os.environ["ENV"] = ENV_DEV # register signal handlers if not is_local_test_mode(): register_signal_handlers() # make sure AWS credentials are configured, otherwise boto3 bails on us check_aws_credentials() patch_moto_request_handling() @log_duration() def prepare_installation(): # install libs if not present install.install_components(apis) @log_duration() def preload_services(): """ Preload services if EAGER_SERVICE_LOADING is true. """ # TODO: lazy loading should become the default beginning 0.13.0 if not config.EAGER_SERVICE_LOADING: # listing the available service plugins will cause resolution of the entry points SERVICE_PLUGINS.list_available() return apis = list() for api in SERVICE_PLUGINS.list_available(): try: SERVICE_PLUGINS.require(api) apis.append(api) except ServiceDisabled as e: LOG.debug("%s", e) except Exception: LOG.exception("could not load service plugin %s", api) if persistence.is_persistence_enabled(): if not config.is_env_true(constants.ENV_PRO_ACTIVATED): LOG.warning( "Persistence mechanism for community services (based on API calls record&replay) will be " "deprecated in 0.13.0 ") persistence.restore_persisted_data(apis) @log_duration() def start_runtime_components(): from localstack.services.edge import start_edge from localstack.services.internal import LocalstackResourceHandler, get_internal_apis # serve internal APIs through the generic proxy ProxyListener.DEFAULT_LISTENERS.append( LocalstackResourceHandler(get_internal_apis())) # TODO: we want a composable LocalStack runtime (edge proxy, service manager, dns, ...) t = start_thread(start_edge, quiet=False) # TODO: properly encapsulate starting/stopping of edge server in a class if not poll_condition( lambda: is_port_open(config.get_edge_port_http()), timeout=5, interval=0.1): raise TimeoutError( f"gave up waiting for edge server on {config.EDGE_BIND_HOST}:{config.EDGE_PORT}" ) return t prepare_environment() prepare_installation() thread = start_runtime_components() preload_services() if config.DATA_DIR: persistence.save_startup_info() print(READY_MARKER_OUTPUT) sys.stdout.flush() INFRA_READY.set() analytics.log.event("infra_ready") return thread
retry(check_events, retries=9, sleep=4) # clean up testutil.delete_lambda_function(lambda_ddb_name) def test_scheduled_lambda(self): def check_invocation(*args): log_events = LambdaTestBase.get_lambda_logs( self.scheduled_lambda_name) self.assertGreater(len(log_events), 0) # wait for up to 1 min for invocations to get triggered retry(check_invocation, retries=14, sleep=5) @pytest.mark.xfail(in_ci(), reason="This test is notoriously flaky in CI environments") def test_sqs_batch_lambda_forward(lambda_client, sqs_client, create_lambda_function): lambda_name_queue_batch = "lambda_queue_batch-%s" % short_uid() # deploy test lambda connected to SQS queue sqs_queue_info = testutil.create_sqs_queue(lambda_name_queue_batch) queue_url = sqs_queue_info["QueueUrl"] resp = create_lambda_function( handler_file=TEST_LAMBDA_PYTHON_ECHO, func_name=lambda_name_queue_batch, event_source_arn=sqs_queue_info["QueueArn"], libs=TEST_LAMBDA_LIBS, )
def do_start_infra(asynchronous, apis, is_in_docker): # import to avoid cyclic dependency from localstack.services.edge import BOOTSTRAP_LOCK event_publisher.fire_event( event_publisher.EVENT_START_INFRA, { "d": is_in_docker and 1 or 0, "c": in_ci() and 1 or 0 }, ) # set up logging setup_logging() if config.DEVELOP: install.install_debugpy_and_dependencies() import debugpy LOG.info("Starting debug server at: %s:%s" % (constants.BIND_HOST, config.DEVELOP_PORT)) debugpy.listen((constants.BIND_HOST, config.DEVELOP_PORT)) if config.WAIT_FOR_DEBUGGER: debugpy.wait_for_client() # prepare APIs apis = canonicalize_api_names(apis) analytics.log.event("infra_start", apis=apis) @log_duration() def prepare_environment(): # set environment os.environ["AWS_REGION"] = config.DEFAULT_REGION os.environ["ENV"] = ENV_DEV # register signal handlers if not is_local_test_mode(): register_signal_handlers() # make sure AWS credentials are configured, otherwise boto3 bails on us check_aws_credentials() @log_duration() def prepare_installation(): # install libs if not present install.install_components(apis) @log_duration() def start_api_services(): # Some services take a bit to come up sleep_time = 5 # start services thread = None # loop through plugins and start each service for name, plugin in SERVICE_PLUGINS.items(): if plugin.is_enabled(api_names=apis): record_service_health(name, "starting") t1 = plugin.start(asynchronous=True) thread = thread or t1 time.sleep(sleep_time) # ensure that all infra components are up and running check_infra(apis=apis) # restore persisted data record_service_health( "features:persistence", "initializing" if config.DATA_DIR else "disabled") persistence.restore_persisted_data(apis=apis) if config.DATA_DIR: record_service_health("features:persistence", "initialized") return thread prepare_environment() prepare_installation() with BOOTSTRAP_LOCK: thread = start_api_services() if config.DATA_DIR: persistence.save_startup_info() print(READY_MARKER_OUTPUT) sys.stdout.flush() INFRA_READY.set() analytics.log.event("infra_ready") return thread