def install_elasticsearch(): if not os.path.exists(INSTALL_DIR_ES): log_install_msg('Elasticsearch') mkdir(INSTALL_DIR_INFRA) # download and extract archive tmp_archive = os.path.join(tempfile.gettempdir(), 'localstack.es.zip') download_and_extract_with_retry(ELASTICSEARCH_JAR_URL, tmp_archive, INSTALL_DIR_INFRA) elasticsearch_dir = glob.glob( os.path.join(INSTALL_DIR_INFRA, 'elasticsearch*')) if not elasticsearch_dir: raise Exception('Unable to find Elasticsearch folder in %s' % INSTALL_DIR_INFRA) shutil.move(elasticsearch_dir[0], INSTALL_DIR_ES) for dir_name in ('data', 'logs', 'modules', 'plugins', 'config/scripts'): dir_path = '%s/%s' % (INSTALL_DIR_ES, dir_name) mkdir(dir_path) chmod_r(dir_path, 0o777) # install default plugins for plugin in ELASTICSEARCH_PLUGIN_LIST: if is_alpine(): # https://github.com/pires/docker-elasticsearch/issues/56 os.environ['ES_TMPDIR'] = '/tmp' plugin_binary = os.path.join(INSTALL_DIR_ES, 'bin', 'elasticsearch-plugin') run('%s install %s' % (plugin_binary, plugin))
def generate_processor_script(events_file, log_file=None): script_file = os.path.join(tempfile.gettempdir(), 'kclipy.%s.processor.py' % short_uid()) if log_file: log_file = "'%s'" % log_file else: log_file = 'None' content = """#!/usr/bin/env python import os, sys, glob, json, socket, time, logging, tempfile import subprocess32 as subprocess logging.basicConfig(level=logging.INFO) for path in glob.glob('%s/lib/python*/site-packages'): sys.path.insert(0, path) sys.path.insert(0, '%s') from localstack.config import DEFAULT_ENCODING from localstack.utils.kinesis import kinesis_connector from localstack.utils.common import timestamp events_file = '%s' log_file = %s error_log = os.path.join(tempfile.gettempdir(), 'kclipy.error.log') if __name__ == '__main__': sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) num_tries = 3 sleep_time = 2 error = None for i in range(0, num_tries): try: sock.connect(events_file) error = None break except Exception as e: error = e if i < num_tries: msg = '%%s: Unable to connect to UNIX socket. Retrying.' %% timestamp() subprocess.check_output('echo "%%s" >> %%s' %% (msg, error_log), shell=True) time.sleep(sleep_time) if error: print("WARN: Unable to connect to UNIX socket after retrying: %%s" %% error) raise error def receive_msg(records, checkpointer, shard_id): try: # records is a list of amazon_kclpy.messages.Record objects -> convert to JSON records_dicts = [j._json_dict for j in records] message_to_send = {'shard_id': shard_id, 'records': records_dicts} string_to_send = '%%s\\n' %% json.dumps(message_to_send) bytes_to_send = string_to_send.encode(DEFAULT_ENCODING) sock.send(bytes_to_send) except Exception as e: msg = "WARN: Unable to forward event: %%s" %% e print(msg) subprocess.check_output('echo "%%s" >> %%s' %% (msg, error_log), shell=True) kinesis_connector.KinesisProcessor.run_processor(log_file=log_file, processor_func=receive_msg) """ % (LOCALSTACK_VENV_FOLDER, LOCALSTACK_ROOT_FOLDER, events_file, log_file) save_file(script_file, content) chmod_r(script_file, 0o755) TMP_FILES.append(script_file) return script_file
def install_elasticsearch(): if not os.path.exists(INSTALL_DIR_ES): LOGGER.info( 'Downloading and installing local Elasticsearch server. This may take some time.' ) mkdir(INSTALL_DIR_INFRA) # download and extract archive download_and_extract_with_retry(ELASTICSEARCH_JAR_URL, TMP_ARCHIVE_ES, INSTALL_DIR_INFRA) elasticsearch_dir = glob.glob( os.path.join(INSTALL_DIR_INFRA, 'elasticsearch*')) if not elasticsearch_dir: raise Exception('Unable to find Elasticsearch folder in %s' % INSTALL_DIR_INFRA) shutil.move(elasticsearch_dir[0], INSTALL_DIR_ES) for dir_name in ('data', 'logs', 'modules', 'plugins', 'config/scripts'): dir_path = '%s/%s' % (INSTALL_DIR_ES, dir_name) mkdir(dir_path) chmod_r(dir_path, 0o777) # install default plugins for plugin in ELASTICSEARCH_PLUGIN_LIST: if is_alpine(): # https://github.com/pires/docker-elasticsearch/issues/56 os.environ['ES_TMPDIR'] = '/tmp' plugin_binary = os.path.join(INSTALL_DIR_ES, 'bin', 'elasticsearch-plugin') run('%s install %s' % (plugin_binary, plugin))
def install_elasticsearch(): if not os.path.exists(INSTALL_DIR_ES): log_install_msg('Elasticsearch') mkdir(INSTALL_DIR_INFRA) # download and extract archive tmp_archive = os.path.join(tempfile.gettempdir(), 'localstack.es.zip') download_and_extract_with_retry(ELASTICSEARCH_JAR_URL, tmp_archive, INSTALL_DIR_INFRA) elasticsearch_dir = glob.glob(os.path.join(INSTALL_DIR_INFRA, 'elasticsearch*')) if not elasticsearch_dir: raise Exception('Unable to find Elasticsearch folder in %s' % INSTALL_DIR_INFRA) shutil.move(elasticsearch_dir[0], INSTALL_DIR_ES) for dir_name in ('data', 'logs', 'modules', 'plugins', 'config/scripts'): dir_path = '%s/%s' % (INSTALL_DIR_ES, dir_name) mkdir(dir_path) chmod_r(dir_path, 0o777) # install default plugins for plugin in ELASTICSEARCH_PLUGIN_LIST: if is_alpine(): # https://github.com/pires/docker-elasticsearch/issues/56 os.environ['ES_TMPDIR'] = '/tmp' plugin_binary = os.path.join(INSTALL_DIR_ES, 'bin', 'elasticsearch-plugin') run('%s install %s' % (plugin_binary, plugin)) # patch JVM options file - replace hardcoded heap size settings jvm_options_file = os.path.join(INSTALL_DIR_ES, 'config', 'jvm.options') if os.path.exists(jvm_options_file): jvm_options = load_file(jvm_options_file) jvm_options_replaced = re.sub(r'(^-Xm[sx][a-zA-Z0-9\.]+$)', r'# \1', jvm_options, flags=re.MULTILINE) if jvm_options != jvm_options_replaced: save_file(jvm_options_file, jvm_options_replaced)
def start_elasticsearch(port=None, delete_data=True, asynchronous=False, update_listener=None): port = port or config.PORT_ELASTICSEARCH # delete Elasticsearch data that may be cached locally from a previous test run delete_all_elasticsearch_data() install.install_elasticsearch() backend_port = DEFAULT_PORT_ELASTICSEARCH_BACKEND es_data_dir = '%s/infra/elasticsearch/data' % (ROOT_PATH) es_tmp_dir = '%s/infra/elasticsearch/tmp' % (ROOT_PATH) if config.DATA_DIR: es_data_dir = '%s/elasticsearch' % config.DATA_DIR # Elasticsearch 5.x cannot be bound to 0.0.0.0 in some Docker environments, # hence we use the default bind address 127.0.0.0 and put a proxy in front of it cmd = (('%s/infra/elasticsearch/bin/elasticsearch ' + '-E http.port=%s -E http.publish_port=%s -E http.compression=false -E path.data=%s') % (ROOT_PATH, backend_port, backend_port, es_data_dir)) env_vars = { 'ES_JAVA_OPTS': os.environ.get('ES_JAVA_OPTS', '-Xms200m -Xmx600m'), 'ES_TMPDIR': es_tmp_dir } print('Starting local Elasticsearch (%s port %s)...' % (get_service_protocol(), port)) if delete_data: run('rm -rf %s' % es_data_dir) # fix permissions chmod_r('%s/infra/elasticsearch' % ROOT_PATH, 0o777) mkdir(es_data_dir) chmod_r(es_data_dir, 0o777) # start proxy and ES process start_proxy_for_service('elasticsearch', port, backend_port, update_listener, quiet=True, params={'protocol_version': 'HTTP/1.0'}) if is_root(): cmd = "su -c '%s' localstack" % cmd thread = do_run(cmd, asynchronous, env_vars=env_vars) return thread
def start_kinesis_mock(port=None, asynchronous=False, update_listener=None): kinesis_mock_bin = install.install_kinesis_mock() backend_port = get_free_tcp_port() global PORT_KINESIS_BACKEND PORT_KINESIS_BACKEND = backend_port kinesis_data_dir_param = "" if config.DATA_DIR: kinesis_data_dir = "%s/kinesis" % config.DATA_DIR mkdir(kinesis_data_dir) kinesis_data_dir_param = "SHOULD_PERSIST_DATA=true PERSIST_PATH=%s" % kinesis_data_dir if not config.LS_LOG: log_level = "INFO" elif config.LS_LOG == "warning": log_level = "WARN" else: log_level = config.LS_LOG.upper() log_level_param = "LOG_LEVEL=%s" % log_level latency = config.KINESIS_LATENCY + "ms" latency_param = ( "CREATE_STREAM_DURATION={l} DELETE_STREAM_DURATION={l} REGISTER_STREAM_CONSUMER_DURATION={l} " "START_STREAM_ENCRYPTION_DURATION={l} STOP_STREAM_ENCRYPTION_DURATION={l} " "DEREGISTER_STREAM_CONSUMER_DURATION={l} MERGE_SHARDS_DURATION={l} SPLIT_SHARD_DURATION={l} " "UPDATE_SHARD_COUNT_DURATION={l}").format(l=latency) if config.KINESIS_INITIALIZE_STREAMS != "": initialize_streams_param = "INITIALIZE_STREAMS=%s" % config.KINESIS_INITIALIZE_STREAMS else: initialize_streams_param = "" if kinesis_mock_bin.endswith(".jar"): cmd = "KINESIS_MOCK_PLAIN_PORT=%s SHARD_LIMIT=%s %s %s %s %s java -XX:+UseG1GC -jar %s" % ( backend_port, config.KINESIS_SHARD_LIMIT, latency_param, kinesis_data_dir_param, log_level_param, initialize_streams_param, kinesis_mock_bin, ) else: chmod_r(kinesis_mock_bin, 0o777) cmd = "KINESIS_MOCK_PLAIN_PORT=%s SHARD_LIMIT=%s %s %s %s %s %s --gc=G1" % ( backend_port, config.KINESIS_SHARD_LIMIT, latency_param, kinesis_data_dir_param, log_level_param, initialize_streams_param, kinesis_mock_bin, ) return _run_proxy_and_command( cmd=cmd, port=port, backend_port=backend_port, update_listener=update_listener, asynchronous=asynchronous, )
def create_lambda_archive( script: str, get_content: bool = False, libs: List[str] = None, runtime: str = None, file_name: str = None, exclude_func: Callable[[str], bool] = None, ): """Utility method to create a Lambda function archive""" if libs is None: libs = [] runtime = runtime or LAMBDA_DEFAULT_RUNTIME with tempfile.TemporaryDirectory(prefix=ARCHIVE_DIR_PREFIX) as tmp_dir: file_name = file_name or get_handler_file_from_name(LAMBDA_DEFAULT_HANDLER, runtime=runtime) script_file = os.path.join(tmp_dir, file_name) if os.path.sep in script_file: mkdir(os.path.dirname(script_file)) # create __init__.py files along the path to allow Python imports path = file_name.split(os.path.sep) for i in range(1, len(path)): save_file(os.path.join(tmp_dir, *(path[:i] + ["__init__.py"])), "") save_file(script_file, script) chmod_r(script_file, 0o777) # copy libs for lib in libs: paths = [lib, "%s.py" % lib] try: module = importlib.import_module(lib) paths.append(module.__file__) except Exception: pass target_dir = tmp_dir root_folder = os.path.join(LOCALSTACK_VENV_FOLDER, "lib/python*/site-packages") if lib == "localstack": paths = ["localstack/*.py", "localstack/utils"] root_folder = LOCALSTACK_ROOT_FOLDER target_dir = os.path.join(tmp_dir, lib) mkdir(target_dir) for path in paths: file_to_copy = path if path.startswith("/") else os.path.join(root_folder, path) for file_path in glob.glob(file_to_copy): name = os.path.join(target_dir, file_path.split(os.path.sep)[-1]) if os.path.isdir(file_path): copy_dir(file_path, name) else: shutil.copyfile(file_path, name) if exclude_func: for dirpath, folders, files in os.walk(tmp_dir): for name in list(folders) + list(files): full_name = os.path.join(dirpath, name) relative = os.path.relpath(full_name, start=tmp_dir) if exclude_func(relative): rm_rf(full_name) # create zip file result = create_zip_file(tmp_dir, get_content=get_content) return result
def install_local_kms(): binary_path = INSTALL_PATH_KMS_BINARY_PATTERN.replace('<arch>', get_arch()) if not os.path.exists(binary_path): log_install_msg('KMS') mkdir(INSTALL_DIR_KMS) kms_url = KMS_URL_PATTERN.replace('<arch>', get_arch()) download(kms_url, binary_path) chmod_r(binary_path, 0o777)
def install_local_kms(): local_arch = f"{platform.system().lower()}-{get_arch()}" binary_path = INSTALL_PATH_KMS_BINARY_PATTERN.replace("<arch>", local_arch) if not os.path.exists(binary_path): log_install_msg("KMS") mkdir(INSTALL_DIR_KMS) kms_url = KMS_URL_PATTERN.replace("<arch>", local_arch) download(kms_url, binary_path) chmod_r(binary_path, 0o777)
def start_kinesis_mock(port=None, asynchronous=False, update_listener=None): target_dir = os.path.join(INSTALL_DIR_INFRA, 'kinesis-mock') machine = platform.machine().lower() system = platform.system().lower() if machine == 'x86_64' or machine == 'amd64': if system == 'windows': target_file_name = 'kinesis-mock-mostly-static.exe' elif system == 'linux': target_file_name = 'kinesis-mock-linux-amd64-static' elif system == 'darwin': target_file_name = 'kinesis-mock-macos-amd64-dynamic' else: target_file_name = 'kinesis-mock.jar' else: target_file_name = 'kinesis-mock.jar' target_file = os.path.join(target_dir, target_file_name) if not os.path.exists(target_file): response = requests.get(KINESIS_MOCK_RELEASES) content = json.loads(to_str(response.content)) assets = content.get('assets', []) filtered = [x for x in assets if x['name'] == target_file_name] archive_url = filtered[0].get('browser_download_url') download(archive_url, target_file) port = port or config.PORT_KINESIS backend_port = get_free_tcp_port() kinesis_data_dir_param = '' if config.DATA_DIR: kinesis_data_dir = '%s/kinesis' % config.DATA_DIR mkdir(kinesis_data_dir) kinesis_data_dir_param = 'SHOULD_PERSIST_DATA=true PERSIST_PATH=%s' % kinesis_data_dir if not config.LS_LOG: log_level = 'INFO' elif config.LS_LOG == 'warning': log_level = 'WARN' else: log_level = config.LS_LOG.upper log_level_param = 'LOG_LEVEL=%s' % (log_level) latency = config.KINESIS_LATENCY + 'ms' latency_param = 'CREATE_STREAM_DURATION=%s DELETE_STREAM_DURATION=%s REGISTER_STREAM_CONSUMER_DURATION=%s ' \ 'START_STREAM_ENCRYPTION_DURATION=%s STOP_STREAM_ENCRYPTION_DURATION=%s ' \ 'DEREGISTER_STREAM_CONSUMER_DURATION=%s MERGE_SHARDS_DURATION=%s SPLIT_SHARD_DURATION=%s ' \ 'UPDATE_SHARD_COUNT_DURATION=%s' \ % (latency, latency, latency, latency, latency, latency, latency, latency, latency) if target_file_name.endswith('.jar'): cmd = 'KINESIS_MOCK_HTTP1_PLAIN_PORT=%s SHARD_LIMIT=%s %s %s %s java -XX:+UseG1GC -jar %s' \ % (backend_port, config.KINESIS_SHARD_LIMIT, latency_param, kinesis_data_dir_param, log_level_param, target_file) else: chmod_r(target_file, 0o777) cmd = 'KINESIS_MOCK_HTTP1_PLAIN_PORT=%s SHARD_LIMIT=%s %s %s %s %s --gc=G1' \ % (backend_port, config.KINESIS_SHARD_LIMIT, latency_param, kinesis_data_dir_param, log_level_param, target_file) start_proxy_for_service('kinesis', port, backend_port, update_listener) return do_run(cmd, asynchronous)
def install_local_kms(): local_arch = get_os() binary_path = INSTALL_PATH_KMS_BINARY_PATTERN.replace("<arch>", local_arch) if not os.path.exists(binary_path): log_install_msg("KMS") mkdir(INSTALL_DIR_KMS) # TODO ARM download platform specific binary kms_url = KMS_URL_PATTERN.replace("<arch>", local_arch) download(kms_url, binary_path) chmod_r(binary_path, 0o777)
def install_kinesis_mock(): target_dir = INSTALL_PATH_KINESIS_MOCK machine = platform.machine().lower() system = platform.system().lower() version = platform.version().lower() is_probably_m1 = system == "darwin" and ("arm64" in version or "arm32" in version) LOG.debug("getting kinesis-mock for %s %s", system, machine) if is_env_true("KINESIS_MOCK_FORCE_JAVA"): # sometimes the static binaries may have problems, and we want to fal back to Java bin_file = "kinesis-mock.jar" elif (machine == "x86_64" or machine == "amd64") and not is_probably_m1: if system == "windows": bin_file = "kinesis-mock-mostly-static.exe" elif system == "linux": bin_file = "kinesis-mock-linux-amd64-static" elif system == "darwin": bin_file = "kinesis-mock-macos-amd64-dynamic" else: bin_file = "kinesis-mock.jar" else: bin_file = "kinesis-mock.jar" bin_file_path = os.path.join(target_dir, bin_file) if os.path.exists(bin_file_path): LOG.debug("kinesis-mock found at %s", bin_file_path) return bin_file_path response = requests.get(KINESIS_MOCK_RELEASE_URL) if not response.ok: raise ValueError("Could not get list of releases from %s: %s" % (KINESIS_MOCK_RELEASE_URL, response.text)) github_release = response.json() download_url = None for asset in github_release.get("assets", []): # find the correct binary in the release if asset["name"] == bin_file: download_url = asset["browser_download_url"] break if download_url is None: raise ValueError("could not find required binary %s in release %s" % (bin_file, KINESIS_MOCK_RELEASE_URL)) mkdir(target_dir) LOG.info("downloading kinesis-mock binary from %s", download_url) download(download_url, bin_file_path) chmod_r(bin_file_path, 0o777) return bin_file_path
def install_elasticsearch(): if not os.path.exists(INSTALL_DIR_ES): LOGGER.info('Downloading and installing local Elasticsearch server. This may take some time.') mkdir(INSTALL_DIR_INFRA) # download and extract archive download_and_extract_with_retry(ELASTICSEARCH_JAR_URL, TMP_ARCHIVE_ES, INSTALL_DIR_INFRA) run('cd %s && mv elasticsearch* elasticsearch' % (INSTALL_DIR_INFRA)) for dir_name in ('data', 'logs', 'modules', 'plugins', 'config/scripts'): dir_path = '%s/%s' % (INSTALL_DIR_ES, dir_name) mkdir(dir_path) chmod_r(dir_path, 0o777)
def install_elasticsearch(version=None): version = get_elasticsearch_install_version(version) install_dir = get_elasticsearch_install_dir(version) installed_executable = os.path.join(install_dir, 'bin', 'elasticsearch') if not os.path.exists(installed_executable): log_install_msg('Elasticsearch (%s)' % version) es_url = ELASTICSEARCH_URLS.get(version) if not es_url: raise Exception('Unable to find download URL for Elasticsearch version "%s"' % version) install_dir_parent = os.path.dirname(install_dir) mkdir(install_dir_parent) # download and extract archive tmp_archive = os.path.join(config.TMP_FOLDER, 'localstack.%s' % os.path.basename(es_url)) download_and_extract_with_retry(es_url, tmp_archive, install_dir_parent) elasticsearch_dir = glob.glob(os.path.join(install_dir_parent, 'elasticsearch*')) if not elasticsearch_dir: raise Exception('Unable to find Elasticsearch folder in %s' % install_dir_parent) shutil.move(elasticsearch_dir[0], install_dir) for dir_name in ('data', 'logs', 'modules', 'plugins', 'config/scripts'): dir_path = os.path.join(install_dir, dir_name) mkdir(dir_path) chmod_r(dir_path, 0o777) # install default plugins for plugin in ELASTICSEARCH_PLUGIN_LIST: if is_alpine(): # https://github.com/pires/docker-elasticsearch/issues/56 os.environ['ES_TMPDIR'] = '/tmp' plugin_binary = os.path.join(install_dir, 'bin', 'elasticsearch-plugin') plugin_dir = os.path.join(install_dir, 'plugins', plugin) if not os.path.exists(plugin_dir): LOG.info('Installing Elasticsearch plugin %s' % (plugin)) run('%s install -b %s' % (plugin_binary, plugin)) # delete some plugins to free up space for plugin in ELASTICSEARCH_DELETE_MODULES: module_dir = os.path.join(install_dir, 'modules', plugin) rm_rf(module_dir) # disable x-pack-ml plugin (not working on Alpine) xpack_dir = os.path.join(install_dir, 'modules', 'x-pack-ml', 'platform') rm_rf(xpack_dir) # patch JVM options file - replace hardcoded heap size settings jvm_options_file = os.path.join(install_dir, 'config', 'jvm.options') if os.path.exists(jvm_options_file): jvm_options = load_file(jvm_options_file) jvm_options_replaced = re.sub(r'(^-Xm[sx][a-zA-Z0-9\.]+$)', r'# \1', jvm_options, flags=re.MULTILINE) if jvm_options != jvm_options_replaced: save_file(jvm_options_file, jvm_options_replaced)
def install_kinesis_mock(): target_dir = INSTALL_PATH_KINESIS_MOCK machine = platform.machine().lower() system = platform.system().lower() version = platform.version().lower() is_probably_m1 = system == 'darwin' and ('arm64' in version or 'arm32' in version) LOG.debug('getting kinesis-mock for %s %s', system, machine) if ((machine == 'x86_64' or machine == 'amd64') and not is_probably_m1): if system == 'windows': bin_file = 'kinesis-mock-mostly-static.exe' elif system == 'linux': bin_file = 'kinesis-mock-linux-amd64-static' elif system == 'darwin': bin_file = 'kinesis-mock-macos-amd64-dynamic' else: bin_file = 'kinesis-mock.jar' else: bin_file = 'kinesis-mock.jar' bin_file_path = os.path.join(target_dir, bin_file) if os.path.exists(bin_file_path): LOG.debug('kinesis-mock found at %s', bin_file_path) return bin_file_path response = requests.get(KINESIS_MOCK_RELEASE_URL) if not response.ok: raise ValueError('Could not get list of releases from %s: %s' % (KINESIS_MOCK_RELEASE_URL, response.text)) github_release = response.json() download_url = None for asset in github_release.get('assets', []): # find the correct binary in the release if asset['name'] == bin_file: download_url = asset['browser_download_url'] break if download_url is None: raise ValueError('could not find required binary %s in release %s' % (bin_file, KINESIS_MOCK_RELEASE_URL)) mkdir(target_dir) LOG.info('downloading kinesis-mock binary from %s', download_url) download(download_url, bin_file_path) chmod_r(bin_file_path, 0o777) return bin_file_path
def init_directories(dirs: Directories): """ Makes sure the directories exist and have the necessary permissions. """ LOG.debug("initializing elasticsearch directories %s", dirs) chmod_r(dirs.install, 0o777) if not dirs.data.startswith(config.dirs.data): # only clear previous data if it's not in DATA_DIR rm_rf(dirs.data) rm_rf(dirs.tmp) mkdir(dirs.tmp) chmod_r(dirs.tmp, 0o777) mkdir(dirs.data) chmod_r(dirs.data, 0o777) mkdir(dirs.backup) chmod_r(dirs.backup, 0o777) # clear potentially existing lock files (which cause problems since ES 7.10) for d, dirs, files in os.walk(dirs.data, True): for f in files: if f.endswith(".lock"): rm_rf(os.path.join(d, f))
def create_lambda_archive(script, get_content=False, libs=[], runtime=None, file_name=None): """Utility method to create a Lambda function archive""" runtime = runtime or LAMBDA_DEFAULT_RUNTIME tmp_dir = tempfile.mkdtemp(prefix=ARCHIVE_DIR_PREFIX) TMP_FILES.append(tmp_dir) file_name = file_name or get_handler_file_from_name(LAMBDA_DEFAULT_HANDLER, runtime=runtime) script_file = os.path.join(tmp_dir, file_name) if os.path.sep in script_file: mkdir(os.path.dirname(script_file)) # create __init__.py files along the path to allow Python imports path = file_name.split(os.path.sep) for i in range(1, len(path)): save_file(os.path.join(tmp_dir, *(path[:i] + ["__init__.py"])), "") save_file(script_file, script) chmod_r(script_file, 0o777) # copy libs for lib in libs: paths = [lib, "%s.py" % lib] try: module = importlib.import_module(lib) paths.append(module.__file__) except Exception: pass target_dir = tmp_dir root_folder = os.path.join(LOCALSTACK_VENV_FOLDER, "lib/python*/site-packages") if lib == "localstack": paths = ["localstack/*.py", "localstack/utils"] root_folder = LOCALSTACK_ROOT_FOLDER target_dir = os.path.join(tmp_dir, lib) mkdir(target_dir) for path in paths: file_to_copy = path if path.startswith("/") else os.path.join( root_folder, path) for file_path in glob.glob(file_to_copy): name = os.path.join(target_dir, file_path.split(os.path.sep)[-1]) if os.path.isdir(file_path): copy_dir(file_path, name) else: shutil.copyfile(file_path, name) # create zip file result = create_zip_file(tmp_dir, get_content=get_content) return result
def start_kinesis_mock(port=None, asynchronous=False, update_listener=None): kinesis_mock_bin = install.install_kinesis_mock() port = port or config.PORT_KINESIS backend_port = get_free_tcp_port() kinesis_data_dir_param = '' if config.DATA_DIR: kinesis_data_dir = '%s/kinesis' % config.DATA_DIR mkdir(kinesis_data_dir) # FIXME: workaround for https://github.com/localstack/localstack/issues/4227 streams_file = os.path.join(kinesis_data_dir, 'kinesis-data.json') if not os.path.exists(streams_file): with open(streams_file, 'w') as fd: fd.write('{"streams":{}}') kinesis_data_dir_param = 'SHOULD_PERSIST_DATA=true PERSIST_PATH=%s' % kinesis_data_dir if not config.LS_LOG: log_level = 'INFO' elif config.LS_LOG == 'warning': log_level = 'WARN' else: log_level = config.LS_LOG.upper() log_level_param = 'LOG_LEVEL=%s' % log_level latency = config.KINESIS_LATENCY + 'ms' latency_param = 'CREATE_STREAM_DURATION=%s DELETE_STREAM_DURATION=%s REGISTER_STREAM_CONSUMER_DURATION=%s ' \ 'START_STREAM_ENCRYPTION_DURATION=%s STOP_STREAM_ENCRYPTION_DURATION=%s ' \ 'DEREGISTER_STREAM_CONSUMER_DURATION=%s MERGE_SHARDS_DURATION=%s SPLIT_SHARD_DURATION=%s ' \ 'UPDATE_SHARD_COUNT_DURATION=%s' \ % (latency, latency, latency, latency, latency, latency, latency, latency, latency) if config.KINESIS_INITIALIZE_STREAMS != '': initialize_streams_param = 'INITIALIZE_STREAMS=%s' % ( config.KINESIS_INITIALIZE_STREAMS) else: initialize_streams_param = '' if kinesis_mock_bin.endswith('.jar'): cmd = 'KINESIS_MOCK_PLAIN_PORT=%s SHARD_LIMIT=%s %s %s %s %s java -XX:+UseG1GC -jar %s' \ % (backend_port, config.KINESIS_SHARD_LIMIT, latency_param, kinesis_data_dir_param, log_level_param, initialize_streams_param, kinesis_mock_bin) else: chmod_r(kinesis_mock_bin, 0o777) cmd = 'KINESIS_MOCK_PLAIN_PORT=%s SHARD_LIMIT=%s %s %s %s %s %s --gc=G1' \ % (backend_port, config.KINESIS_SHARD_LIMIT, latency_param, kinesis_data_dir_param, log_level_param, initialize_streams_param, kinesis_mock_bin) LOGGER.info('starting kinesis-mock proxy %d:%d with cmd: %s', port, backend_port, cmd) start_proxy_for_service('kinesis', port, backend_port, update_listener) return do_run(cmd, asynchronous)
def install_opensearch(version=None): # locally import to avoid having a dependency on ASF when starting the CLI from localstack.aws.api.opensearch import EngineType from localstack.services.opensearch import versions if not version: version = OPENSEARCH_DEFAULT_VERSION version = get_opensearch_install_version(version) install_dir = get_opensearch_install_dir(version) installed_executable = os.path.join(install_dir, "bin", "opensearch") if not os.path.exists(installed_executable): with OS_INSTALL_LOCKS.setdefault(version, threading.Lock()): if not os.path.exists(installed_executable): log_install_msg("OpenSearch (%s)" % version) opensearch_url = versions.get_download_url( version, EngineType.OpenSearch) install_dir_parent = os.path.dirname(install_dir) mkdir(install_dir_parent) # download and extract archive tmp_archive = os.path.join( config.dirs.tmp, f"localstack.{os.path.basename(opensearch_url)}") download_and_extract_with_retry(opensearch_url, tmp_archive, install_dir_parent) opensearch_dir = glob.glob( os.path.join(install_dir_parent, "opensearch*")) if not opensearch_dir: raise Exception("Unable to find OpenSearch folder in %s" % install_dir_parent) shutil.move(opensearch_dir[0], install_dir) for dir_name in ("data", "logs", "modules", "plugins", "config/scripts"): dir_path = os.path.join(install_dir, dir_name) mkdir(dir_path) chmod_r(dir_path, 0o777) # patch JVM options file - replace hardcoded heap size settings jvm_options_file = os.path.join(install_dir, "config", "jvm.options") if os.path.exists(jvm_options_file): jvm_options = load_file(jvm_options_file) jvm_options_replaced = re.sub(r"(^-Xm[sx][a-zA-Z0-9\.]+$)", r"# \1", jvm_options, flags=re.MULTILINE) if jvm_options != jvm_options_replaced: save_file(jvm_options_file, jvm_options_replaced)
def install_elasticsearch(): if not os.path.exists(INSTALL_DIR_ES): LOGGER.info( 'Downloading and installing local Elasticsearch server. This may take some time.' ) mkdir(INSTALL_DIR_INFRA) # download and extract archive download_and_extract_with_retry(ELASTICSEARCH_JAR_URL, TMP_ARCHIVE_ES, INSTALL_DIR_INFRA) run('cd %s && mv elasticsearch* elasticsearch' % (INSTALL_DIR_INFRA)) for dir_name in ('data', 'logs', 'modules', 'plugins', 'config/scripts'): dir_path = '%s/%s' % (INSTALL_DIR_ES, dir_name) mkdir(dir_path) chmod_r(dir_path, 0o777)
def install_terraform() -> str: if os.path.isfile(TERRAFORM_BIN): return TERRAFORM_BIN log_install_msg(f"Installing terraform {TERRAFORM_VERSION}") system = platform.system().lower() arch = get_arch() url = TERRAFORM_URL_TEMPLATE.format(version=TERRAFORM_VERSION, os=system, arch=arch) download_and_extract(url, os.path.dirname(TERRAFORM_BIN)) chmod_r(TERRAFORM_BIN, 0o777) return TERRAFORM_BIN
def save_startup_info(): from localstack_ext import __version__ as localstack_ext_version file_path = os.path.join(config.dirs.data, STARTUP_INFO_FILE) info = StartupInfo( timestamp=datetime.datetime.now().isoformat(), localstack_version=constants.VERSION, localstack_ext_version=localstack_ext_version, pro_activated=is_env_true(constants.ENV_PRO_ACTIVATED), ) LOG.debug("saving startup info %s", info) try: _append_startup_info(file_path, info) except IOError as e: LOG.error("could not save startup info: %s", e) chmod_r(file_path, 0o777) return info
def _create_shell_command(self) -> str: """ helper method for creating kinesis mock invocation command """ if self._data_dir: kinesis_data_dir_param = "SHOULD_PERSIST_DATA=true PERSIST_PATH=%s" % self._data_dir else: kinesis_data_dir_param = "" log_level_param = "LOG_LEVEL=%s" % self._log_level latency_param = ( "CREATE_STREAM_DURATION={l} DELETE_STREAM_DURATION={l} REGISTER_STREAM_CONSUMER_DURATION={l} " "START_STREAM_ENCRYPTION_DURATION={l} STOP_STREAM_ENCRYPTION_DURATION={l} " "DEREGISTER_STREAM_CONSUMER_DURATION={l} MERGE_SHARDS_DURATION={l} SPLIT_SHARD_DURATION={l} " "UPDATE_SHARD_COUNT_DURATION={l}").format(l=self._latency) init_streams_param = ("INITIALIZE_STREAMS=%s" % self._initialize_streams if self._initialize_streams else "") if self._bin_path.endswith(".jar"): cmd = ( "KINESIS_MOCK_PLAIN_PORT=%s SHARD_LIMIT=%s %s %s %s %s java -XX:+UseG1GC -jar %s" % ( self.port, config.KINESIS_SHARD_LIMIT, latency_param, kinesis_data_dir_param, log_level_param, init_streams_param, self._bin_path, )) else: chmod_r(self._bin_path, 0o777) cmd = "KINESIS_MOCK_PLAIN_PORT=%s SHARD_LIMIT=%s %s %s %s %s %s --gc=G1" % ( self.port, config.KINESIS_SHARD_LIMIT, latency_param, kinesis_data_dir_param, log_level_param, init_streams_param, self._bin_path, ) return cmd
def install_local_kms(): local_arch = get_arch() binary_path = INSTALL_PATH_KMS_BINARY_PATTERN.replace('<arch>', local_arch) if not os.path.exists(binary_path): log_install_msg('KMS') mkdir(INSTALL_DIR_KMS) alpine = '' operating_system = local_arch processor = 'amd64' if (local_arch == 'osx'): operating_system = 'darwin' elif (local_arch == 'alpine'): operating_system = 'linux' alpine = '-alpine' if is_aarch64(): processor = 'arm64' kms_url = KMS_URL_PATTERN.replace('<os>', operating_system).replace( '<processor>', processor).replace('<alpine>', alpine) download(kms_url, binary_path) chmod_r(binary_path, 0o777)
def prepare_docker_start(): # prepare environment for docker start container_name = config.MAIN_CONTAINER_NAME if DOCKER_CLIENT.is_container_running(container_name): raise ContainerExists('LocalStack container named "%s" is already running' % container_name) if config.TMP_FOLDER != config.HOST_TMP_FOLDER and not config.LAMBDA_REMOTE_DOCKER: print( f"WARNING: The detected temp folder for localstack ({config.TMP_FOLDER}) is not equal to the " f"HOST_TMP_FOLDER environment variable set ({config.HOST_TMP_FOLDER})." ) # Logger is not initialized at this point, so the warning is displayed via print os.environ[ENV_SCRIPT_STARTING_DOCKER] = "1" # make sure temp folder exists mkdir(config.TMP_FOLDER) try: chmod_r(config.TMP_FOLDER, 0o777) except Exception: pass
def _create_shell_command(self) -> Tuple[List, Dict]: """ Helper method for creating kinesis mock invocation command :return: returns a tuple containing the command list and a dictionary with the environment variables """ env_vars = { "KINESIS_MOCK_PLAIN_PORT": self.port, "SHARD_LIMIT": config.KINESIS_SHARD_LIMIT } latency_params = [ "CREATE_STREAM_DURATION", "DELETE_STREAM_DURATION", "REGISTER_STREAM_CONSUMER_DURATION", "START_STREAM_ENCRYPTION_DURATION", "STOP_STREAM_ENCRYPTION_DURATION", "DEREGISTER_STREAM_CONSUMER_DURATION", "MERGE_SHARDS_DURATION", "SPLIT_SHARD_DURATION", "UPDATE_SHARD_COUNT_DURATION", ] for param in latency_params: env_vars[param] = self._latency if self._data_dir: env_vars["SHOULD_PERSIST_DATA"] = "true" env_vars["PERSIST_PATH"] = self._data_dir env_vars["PERSIST_INTERVAL"] = config.KINESIS_MOCK_PERSIST_INTERVAL env_vars["LOG_LEVEL"] = self._log_level if self._initialize_streams: env_vars["INITIALIZE_STREAMS"] = self._initialize_streams if self._bin_path.endswith(".jar"): cmd = ["java", "-XX:+UseG1GC", "-jar", self._bin_path] else: chmod_r(self._bin_path, 0o777) cmd = [self._bin_path, "--gc=G1"] return cmd, env_vars
def start_elasticsearch(port=None, version=None, delete_data=True, asynchronous=False, update_listener=None): if STATE.get('_thread_'): return STATE['_thread_'] port = port or config.PORT_ELASTICSEARCH # delete Elasticsearch data that may be cached locally from a previous test run delete_all_elasticsearch_data(version) install.install_elasticsearch(version) backend_port = get_free_tcp_port() base_dir = install.get_elasticsearch_install_dir(version) es_data_dir = os.path.join(base_dir, 'data') es_tmp_dir = os.path.join(base_dir, 'tmp') es_mods_dir = os.path.join(base_dir, 'modules') if config.DATA_DIR: delete_data = False es_data_dir = '%s/elasticsearch' % config.DATA_DIR # Elasticsearch 5.x cannot be bound to 0.0.0.0 in some Docker environments, # hence we use the default bind address 127.0.0.0 and put a proxy in front of it backup_dir = os.path.join(config.TMP_FOLDER, 'es_backup') cmd = ( ('%s/bin/elasticsearch ' + '-E http.port=%s -E http.publish_port=%s -E http.compression=false ' + '-E path.data=%s -E path.repo=%s') % (base_dir, backend_port, backend_port, es_data_dir, backup_dir)) if os.path.exists(os.path.join(es_mods_dir, 'x-pack-ml')): cmd += ' -E xpack.ml.enabled=false' env_vars = { 'ES_JAVA_OPTS': os.environ.get('ES_JAVA_OPTS', '-Xms200m -Xmx600m'), 'ES_TMPDIR': es_tmp_dir } LOG.debug('Starting local Elasticsearch (%s port %s)' % (get_service_protocol(), port)) if delete_data: rm_rf(es_data_dir) # fix permissions chmod_r(base_dir, 0o777) mkdir(es_data_dir) chmod_r(es_data_dir, 0o777) mkdir(es_tmp_dir) chmod_r(es_tmp_dir, 0o777) # start proxy and ES process proxy = start_proxy_for_service('elasticsearch', port, backend_port, update_listener, quiet=True, params={'protocol_version': 'HTTP/1.0'}) STATE['_proxy_'] = proxy if is_root(): cmd = "su localstack -c '%s'" % cmd thread = do_run(cmd, asynchronous, env_vars=env_vars) STATE['_thread_'] = thread return thread
def install_kinesis_mock(bin_file_path: str = None): response = requests.get(KINESIS_MOCK_RELEASE_URL) if not response.ok: raise ValueError("Could not get list of releases from %s: %s" % (KINESIS_MOCK_RELEASE_URL, response.text)) bin_file_path = bin_file_path or kinesis_mock_install_path() github_release = response.json() download_url = None bin_file_name = os.path.basename(bin_file_path) for asset in github_release.get("assets", []): # find the correct binary in the release if asset["name"] == bin_file_name: download_url = asset["browser_download_url"] break if download_url is None: raise ValueError("could not find required binary %s in release %s" % (bin_file_name, KINESIS_MOCK_RELEASE_URL)) mkdir(INSTALL_DIR_KINESIS_MOCK) LOG.info("downloading kinesis-mock binary from %s", download_url) download(download_url, bin_file_path) chmod_r(bin_file_path, 0o777)
def _init_directories(self): dirs = self.directories LOG.debug("initializing elasticsearch directories %s", dirs) chmod_r(dirs.base, 0o777) if not dirs.data.startswith(config.DATA_DIR): # only clear previous data if it's not in DATA_DIR rm_rf(dirs.data) mkdir(dirs.data) chmod_r(dirs.data, 0o777) rm_rf(dirs.tmp) mkdir(dirs.tmp) chmod_r(dirs.tmp, 0o777)
def install_elasticsearch(version=None): version = get_elasticsearch_install_version(version) install_dir = get_elasticsearch_install_dir(version) installed_executable = os.path.join(install_dir, "bin", "elasticsearch") if not os.path.exists(installed_executable): log_install_msg("Elasticsearch (%s)" % version) es_url = ELASTICSEARCH_URLS.get(version) if not es_url: raise Exception( 'Unable to find download URL for Elasticsearch version "%s"' % version) install_dir_parent = os.path.dirname(install_dir) mkdir(install_dir_parent) # download and extract archive tmp_archive = os.path.join(config.TMP_FOLDER, "localstack.%s" % os.path.basename(es_url)) download_and_extract_with_retry(es_url, tmp_archive, install_dir_parent) elasticsearch_dir = glob.glob( os.path.join(install_dir_parent, "elasticsearch*")) if not elasticsearch_dir: raise Exception("Unable to find Elasticsearch folder in %s" % install_dir_parent) shutil.move(elasticsearch_dir[0], install_dir) for dir_name in ("data", "logs", "modules", "plugins", "config/scripts"): dir_path = os.path.join(install_dir, dir_name) mkdir(dir_path) chmod_r(dir_path, 0o777) # install default plugins for plugin in ELASTICSEARCH_PLUGIN_LIST: if is_alpine(): # https://github.com/pires/docker-elasticsearch/issues/56 os.environ["ES_TMPDIR"] = "/tmp" plugin_binary = os.path.join(install_dir, "bin", "elasticsearch-plugin") plugin_dir = os.path.join(install_dir, "plugins", plugin) if not os.path.exists(plugin_dir): LOG.info("Installing Elasticsearch plugin %s" % plugin) def try_install(): safe_run([plugin_binary, "install", "-b", plugin]) # We're occasionally seeing javax.net.ssl.SSLHandshakeException -> add download retries download_attempts = 3 try: retry(try_install, retries=download_attempts - 1, sleep=2) except Exception: LOG.warning( "Unable to download Elasticsearch plugin '%s' after %s attempts" % (plugin, download_attempts)) if not os.environ.get("IGNORE_ES_DOWNLOAD_ERRORS"): raise # delete some plugins to free up space for plugin in ELASTICSEARCH_DELETE_MODULES: module_dir = os.path.join(install_dir, "modules", plugin) rm_rf(module_dir) # disable x-pack-ml plugin (not working on Alpine) xpack_dir = os.path.join(install_dir, "modules", "x-pack-ml", "platform") rm_rf(xpack_dir) # patch JVM options file - replace hardcoded heap size settings jvm_options_file = os.path.join(install_dir, "config", "jvm.options") if os.path.exists(jvm_options_file): jvm_options = load_file(jvm_options_file) jvm_options_replaced = re.sub(r"(^-Xm[sx][a-zA-Z0-9\.]+$)", r"# \1", jvm_options, flags=re.MULTILINE) if jvm_options != jvm_options_replaced: save_file(jvm_options_file, jvm_options_replaced)
es_tmp_dir = '%s/infra/elasticsearch/tmp' % (ROOT_PATH) if DATA_DIR: es_data_dir = '%s/elasticsearch' % DATA_DIR # Elasticsearch 5.x cannot be bound to 0.0.0.0 in some Docker environments, # hence we use the default bind address 127.0.0.0 and put a proxy in front of it cmd = (( 'ES_JAVA_OPTS=\"$ES_JAVA_OPTS -Xms200m -Xmx500m\" ES_TMPDIR="%s" ' + '%s/infra/elasticsearch/bin/elasticsearch ' + '-E http.port=%s -E http.publish_port=%s -E http.compression=false -E path.data=%s' ) % (es_tmp_dir, ROOT_PATH, backend_port, backend_port, es_data_dir)) print('Starting local Elasticsearch (%s port %s)...' % (get_service_protocol(), port)) if delete_data: run('rm -rf %s' % es_data_dir) # fix permissions chmod_r('%s/infra/elasticsearch' % ROOT_PATH, 0o777) mkdir(es_data_dir) chmod_r(es_data_dir, 0o777) # start proxy and ES process start_proxy_for_service('elasticsearch', port, backend_port, update_listener, quiet=True, params={'protocol_version': 'HTTP/1.0'}) if is_root(): cmd = "su -c '%s' localstack" % cmd thread = do_run(cmd, async) return thread