Beispiel #1
0
    def do_start_thread(self) -> FuncThread:
        # FIXME: if this fails the cluster could be left in a wonky state
        # FIXME: this is not a good place to run install, and it only works because we're
        #  assuming that there will only ever be one running Elasticsearch cluster
        install.install_elasticsearch(self.version)
        self._init_directories()

        cmd = self._create_run_command(
            additional_settings=self.command_settings)
        cmd = " ".join(cmd)

        user = constants.OS_USER_ELASTICSEARCH
        if is_root() and user:
            # run the elasticsearch process as a non-root user (when running in docker)
            cmd = f"su {user} -c '{cmd}'"

        env_vars = self._create_env_vars()

        LOG.info("starting elasticsearch: %s with env %s", cmd, env_vars)
        t = ShellCommandThread(
            cmd,
            env_vars=env_vars,
            strip_color=True,
            log_listener=self._log_listener,
        )
        t.start()
        return t
Beispiel #2
0
    def _run_elasticsearch(self, *args):
        # *args is necessary for start_thread to work
        with self._lifecycle_lock:
            if self._elasticsearch_thread:
                return

            # FIXME: if this fails the cluster could be left in a wonky state
            # FIXME: this is not a good place to run install, and it only works because we're
            #  assuming that there will only ever be one running Elasticsearch cluster
            install.install_elasticsearch(self.version)
            self._init_directories()

            cmd = self._create_run_command(additional_settings=self.command_settings)
            cmd = " ".join(cmd)

            user = constants.OS_USER_ELASTICSEARCH
            if is_root() and user:
                # run the elasticsearch process as a non-root user (when running in docker)
                cmd = f"su {user} -c '{cmd}'"

            env_vars = self._create_env_vars()

            LOG.info("starting elasticsearch: %s with env %s", cmd, env_vars)
            # use asynchronous=True to get a ShellCommandThread
            self._elasticsearch_thread = do_run(cmd, asynchronous=True, env_vars=env_vars)
            self._starting.set()

        # block until the thread running the command is done
        try:
            self._elasticsearch_thread.join()
        finally:
            LOG.info("elasticsearch process ended")
            self._stopped.set()
Beispiel #3
0
def start_elasticsearch(port=None, delete_data=True, asynchronous=False, update_listener=None):
    port = port or config.PORT_ELASTICSEARCH
    # delete Elasticsearch data that may be cached locally from a previous test run
    delete_all_elasticsearch_data()

    install.install_elasticsearch()
    backend_port = DEFAULT_PORT_ELASTICSEARCH_BACKEND
    es_data_dir = '%s/infra/elasticsearch/data' % (ROOT_PATH)
    es_tmp_dir = '%s/infra/elasticsearch/tmp' % (ROOT_PATH)
    if config.DATA_DIR:
        es_data_dir = '%s/elasticsearch' % config.DATA_DIR
    # Elasticsearch 5.x cannot be bound to 0.0.0.0 in some Docker environments,
    # hence we use the default bind address 127.0.0.0 and put a proxy in front of it
    cmd = (('%s/infra/elasticsearch/bin/elasticsearch ' +
        '-E http.port=%s -E http.publish_port=%s -E http.compression=false -E path.data=%s') %
        (ROOT_PATH, backend_port, backend_port, es_data_dir))
    env_vars = {
        'ES_JAVA_OPTS': os.environ.get('ES_JAVA_OPTS', '-Xms200m -Xmx600m'),
        'ES_TMPDIR': es_tmp_dir
    }
    print('Starting local Elasticsearch (%s port %s)...' % (get_service_protocol(), port))
    if delete_data:
        run('rm -rf %s' % es_data_dir)
    # fix permissions
    chmod_r('%s/infra/elasticsearch' % ROOT_PATH, 0o777)
    mkdir(es_data_dir)
    chmod_r(es_data_dir, 0o777)
    # start proxy and ES process
    start_proxy_for_service('elasticsearch', port, backend_port,
        update_listener, quiet=True, params={'protocol_version': 'HTTP/1.0'})
    if is_root():
        cmd = "su -c '%s' localstack" % cmd
    thread = do_run(cmd, asynchronous, env_vars=env_vars)
    return thread
Beispiel #4
0
def start_elasticsearch(port=None,
                        version=None,
                        delete_data=True,
                        asynchronous=False,
                        update_listener=None):
    if STATE.get('_thread_'):
        return STATE['_thread_']

    port = port or config.PORT_ELASTICSEARCH
    # delete Elasticsearch data that may be cached locally from a previous test run
    delete_all_elasticsearch_data(version)

    install.install_elasticsearch(version)
    backend_port = get_free_tcp_port()
    base_dir = install.get_elasticsearch_install_dir(version)
    es_data_dir = os.path.join(base_dir, 'data')
    es_tmp_dir = os.path.join(base_dir, 'tmp')
    es_mods_dir = os.path.join(base_dir, 'modules')
    if config.DATA_DIR:
        delete_data = False
        es_data_dir = '%s/elasticsearch' % config.DATA_DIR
    # Elasticsearch 5.x cannot be bound to 0.0.0.0 in some Docker environments,
    # hence we use the default bind address 127.0.0.0 and put a proxy in front of it
    backup_dir = os.path.join(config.TMP_FOLDER, 'es_backup')
    cmd = (
        ('%s/bin/elasticsearch ' +
         '-E http.port=%s -E http.publish_port=%s -E http.compression=false ' +
         '-E path.data=%s -E path.repo=%s') %
        (base_dir, backend_port, backend_port, es_data_dir, backup_dir))
    if os.path.exists(os.path.join(es_mods_dir, 'x-pack-ml')):
        cmd += ' -E xpack.ml.enabled=false'
    env_vars = {
        'ES_JAVA_OPTS': os.environ.get('ES_JAVA_OPTS', '-Xms200m -Xmx600m'),
        'ES_TMPDIR': es_tmp_dir
    }
    LOG.debug('Starting local Elasticsearch (%s port %s)' %
              (get_service_protocol(), port))
    if delete_data:
        rm_rf(es_data_dir)
    # fix permissions
    chmod_r(base_dir, 0o777)
    mkdir(es_data_dir)
    chmod_r(es_data_dir, 0o777)
    mkdir(es_tmp_dir)
    chmod_r(es_tmp_dir, 0o777)
    # start proxy and ES process
    proxy = start_proxy_for_service('elasticsearch',
                                    port,
                                    backend_port,
                                    update_listener,
                                    quiet=True,
                                    params={'protocol_version': 'HTTP/1.0'})
    STATE['_proxy_'] = proxy
    if is_root():
        cmd = "su localstack -c '%s'" % cmd
    thread = do_run(cmd, asynchronous, env_vars=env_vars)
    STATE['_thread_'] = thread
    return thread
Beispiel #5
0
    def run_install(*args):
        with INIT_LOCK:
            if installed.is_set():
                return

            LOG.info("installing elasticsearch")
            install_elasticsearch()
            LOG.info("done installing elasticsearch")
            installed.set()
Beispiel #6
0
 def run_install(*args):
     with INIT_LOCK:
         if installed.is_set():
             return
         LOG.info("installing elasticsearch default version")
         install_elasticsearch()
         LOG.info("done installing elasticsearch default version")
         LOG.info("installing opensearch default version")
         install_opensearch()
         LOG.info("done installing opensearch default version")
         installed.set()
Beispiel #7
0
def start_elasticsearch_instance(version):
    # Note: keep imports here to avoid circular dependencies
    from localstack.services.es import es_starter

    # install ES version
    install_version = get_install_version_for_api_version(version)
    install.install_elasticsearch(install_version)

    t1 = es_starter.start_elasticsearch(asynchronous=True, version=install_version)
    # sleep some time to give Elasticsearch enough time to come up
    time.sleep(8)
    # ensure that all infra components are up and running
    check_infra(apis=[], additional_checks=[es_starter.check_elasticsearch])
    return t1
Beispiel #8
0
def delete_all_elasticsearch_data():
    """ This function drops ALL data in the local Elasticsearch data folder. Use with caution! """
    data_dir = os.path.join(LOCALSTACK_ROOT_FOLDER, 'infra', 'elasticsearch',
                            'data', 'elasticsearch', 'nodes')
    run('rm -rf "%s"' % data_dir)


def start_elasticsearch(port=PORT_ELASTICSEARCH,
                        delete_data=True,
                        async=False,
                        update_listener=None):
    # delete Elasticsearch data that may be cached locally from a previous test run
    delete_all_elasticsearch_data()

    install.install_elasticsearch()
    backend_port = DEFAULT_PORT_ELASTICSEARCH_BACKEND
    es_data_dir = '%s/infra/elasticsearch/data' % (ROOT_PATH)
    es_tmp_dir = '%s/infra/elasticsearch/tmp' % (ROOT_PATH)
    if DATA_DIR:
        es_data_dir = '%s/elasticsearch' % DATA_DIR
    # Elasticsearch 5.x cannot be bound to 0.0.0.0 in some Docker environments,
    # hence we use the default bind address 127.0.0.0 and put a proxy in front of it
    cmd = ((
        'ES_JAVA_OPTS=\"$ES_JAVA_OPTS -Xms200m -Xmx500m\" ES_TMPDIR="%s" ' +
        '%s/infra/elasticsearch/bin/elasticsearch ' +
        '-E http.port=%s -E http.publish_port=%s -E http.compression=false -E path.data=%s'
    ) % (es_tmp_dir, ROOT_PATH, backend_port, backend_port, es_data_dir))
    print('Starting local Elasticsearch (%s port %s)...' %
          (get_service_protocol(), port))
    if delete_data:
Beispiel #9
0
 def _ensure_installed(self):
     install.install_elasticsearch(self.version)
Beispiel #10
0
 def run_install(*args):
     with INIT_LOCK:
         LOG.info("installing elasticsearch")
         install_elasticsearch()
         LOG.info("done installing elasticsearch")