Esempio n. 1
0
def get_machine_id() -> str:
    cache_path = os.path.join(config.dirs.cache, "machine.json")
    doc = FileMappedDocument(cache_path)

    if "machine_id" not in doc:
        # generate a machine id
        doc["machine_id"] = _generate_machine_id()
        # try to cache the machine ID
        call_safe(doc.save)

    return doc["machine_id"]
Esempio n. 2
0
    def test_multiplexing_cluster(self, monkeypatch):
        monkeypatch.setattr(config, "ES_ENDPOINT_STRATEGY", "domain")
        monkeypatch.setattr(config, "ES_MULTI_CLUSTER", False)

        manager = MultiplexingClusterManager()

        # create two elasticsearch domains
        domain0_name = f"domain-{short_uid()}"
        domain1_name = f"domain-{short_uid()}"
        domain0_arn = get_domain_arn(domain0_name, "us-east-1",
                                     TEST_AWS_ACCOUNT_ID)
        domain1_arn = get_domain_arn(domain1_name, "us-east-1",
                                     TEST_AWS_ACCOUNT_ID)
        cluster0 = manager.create(domain0_arn, dict(DomainName=domain0_name))
        cluster1 = manager.create(domain1_arn, dict(DomainName=domain1_name))

        try:
            # spawn the two clusters
            assert cluster0.wait_is_up(240)
            assert cluster1.wait_is_up(240)

            retry(lambda: try_cluster_health(cluster0.url),
                  retries=12,
                  sleep=10)
            retry(lambda: try_cluster_health(cluster1.url),
                  retries=12,
                  sleep=10)

            # create an index in cluster0, wait for it to appear, make sure it's not in cluster1
            index0_url = cluster0.url + "/my-index?pretty"
            index1_url = cluster1.url + "/my-index?pretty"

            response = requests.put(index0_url)
            assert response.ok, "failed to put index into cluster %s: %s" % (
                cluster0.url,
                response.text,
            )
            assert poll_condition(lambda: requests.head(index0_url).ok,
                                  timeout=10), "gave up waiting for index"

            assert requests.head(
                index1_url).ok, "expected index to appear by multiplexing"

        finally:
            call_safe(cluster0.shutdown)
            call_safe(cluster1.shutdown)
Esempio n. 3
0
    def test_multiplexing_cluster(self, monkeypatch):
        monkeypatch.setattr(config, "OPENSEARCH_ENDPOINT_STRATEGY", "domain")
        monkeypatch.setattr(config, "OPENSEARCH_MULTI_CLUSTER", False)

        manager = MultiplexingClusterManager()

        # create two opensearch domains
        domain_key_0 = DomainKey(domain_name=f"domain-{short_uid()}",
                                 region="us-east-1",
                                 account=TEST_AWS_ACCOUNT_ID)
        domain_key_1 = DomainKey(domain_name=f"domain-{short_uid()}",
                                 region="us-east-1",
                                 account=TEST_AWS_ACCOUNT_ID)
        cluster_0 = manager.create(domain_key_0.arn,
                                   OPENSEARCH_DEFAULT_VERSION)
        cluster_1 = manager.create(domain_key_1.arn,
                                   OPENSEARCH_DEFAULT_VERSION)

        try:
            # spawn the two clusters
            assert cluster_0.wait_is_up(240)
            assert cluster_1.wait_is_up(240)

            retry(lambda: try_cluster_health(cluster_0.url),
                  retries=12,
                  sleep=10)
            retry(lambda: try_cluster_health(cluster_1.url),
                  retries=12,
                  sleep=10)

            # create an index in cluster_0, wait for it to appear, make sure it's in cluster_1, too
            index_url_0 = cluster_0.url + "/my-index?pretty"
            index_url_1 = cluster_1.url + "/my-index?pretty"

            response = requests.put(index_url_0)
            assert response.ok, f"failed to put index into cluster {cluster_0.url}: {response.text}"
            assert poll_condition(lambda: requests.head(index_url_0).ok,
                                  timeout=10), "gave up waiting for index"

            assert requests.head(
                index_url_1).ok, "index should appear in second cluster"

        finally:
            call_safe(cluster_0.shutdown)
            call_safe(cluster_1.shutdown)
Esempio n. 4
0
 def __init__(
     self,
     name,
     start=_default,
     check=_default,
     listener=None,
     active=False,
     stop=None,
     lifecycle_hook: ServiceLifecycleHook = None,
 ):
     self.plugin_name = name
     self.start_function = start
     self.listener = listener
     self.check_function = check if check is not _default else local_api_checker(
         name)
     self.default_active = active
     self.stop_function = stop
     self.lifecycle_hook = lifecycle_hook or ServiceLifecycleHook()
     call_safe(self.lifecycle_hook.on_after_init)
Esempio n. 5
0
    def start(self, asynchronous):
        call_safe(self.lifecycle_hook.on_before_start)

        if not self.start_function:
            return

        if self.start_function is _default:
            # fallback start method that simply adds the listener function to the list of proxy listeners if it exists
            if not self.listener:
                return

            from localstack.services.infra import add_service_proxy_listener

            add_service_proxy_listener(self.plugin_name, self.listener)
            return

        kwargs = {"asynchronous": asynchronous}
        if self.listener:
            kwargs["update_listener"] = self.listener
        return self.start_function(**kwargs)
Esempio n. 6
0
    def test_endpoint_strategy_port_singleton_cluster(self, monkeypatch):
        monkeypatch.setattr(config, "OPENSEARCH_ENDPOINT_STRATEGY", "port")
        monkeypatch.setattr(config, "OPENSEARCH_MULTI_CLUSTER", False)

        manager = SingletonClusterManager()

        # create two opensearch domains
        domain_key_0 = DomainKey(domain_name=f"domain-{short_uid()}",
                                 region="us-east-1",
                                 account=TEST_AWS_ACCOUNT_ID)
        domain_key_1 = DomainKey(domain_name=f"domain-{short_uid()}",
                                 region="us-east-1",
                                 account=TEST_AWS_ACCOUNT_ID)
        cluster_0 = manager.create(domain_key_0.arn,
                                   OPENSEARCH_DEFAULT_VERSION)
        cluster_1 = manager.create(domain_key_1.arn,
                                   OPENSEARCH_DEFAULT_VERSION)

        # check if the first port url matches the port range

        parts = cluster_0.url.split(":")
        assert parts[0] == "http"
        assert parts[1] == "//localhost"
        assert int(parts[2]) in range(config.EXTERNAL_SERVICE_PORTS_START,
                                      config.EXTERNAL_SERVICE_PORTS_END)

        # check if the second url matches the first one
        assert cluster_0.url == cluster_1.url

        try:
            # wait for the two clusters
            assert cluster_0.wait_is_up(240)
            # make sure cluster_0 (which is equal to cluster_1) is reachable
            retry(lambda: try_cluster_health(cluster_0.url),
                  retries=3,
                  sleep=5)
        finally:
            call_safe(cluster_0.shutdown)
            call_safe(cluster_1.shutdown)
Esempio n. 7
0
 def shutdown_all(self):
     while self.clusters:
         domain, cluster = self.clusters.popitem()
         call_safe(cluster.shutdown)
Esempio n. 8
0
 def stop(self):
     call_safe(self.lifecycle_hook.on_before_stop)
     if not self.stop_function:
         return
     return self.stop_function()
Esempio n. 9
0
    def test_custom_backend(self, httpserver, monkeypatch):
        monkeypatch.setattr(config, "OPENSEARCH_ENDPOINT_STRATEGY", "domain")
        monkeypatch.setattr(config, "OPENSEARCH_CUSTOM_BACKEND", httpserver.url_for("/"))

        # create fake elasticsearch cluster
        httpserver.expect_request("/").respond_with_json(
            {
                "name": "om",
                "cluster_name": "opensearch",
                "cluster_uuid": "gREewvVZR0mIswR-8-6VRQ",
                "version": {
                    "number": "7.10.0",
                    "build_flavor": "default",
                    "build_type": "tar",
                    "build_hash": "51e9d6f22758d0374a0f3f5c6e8f3a7997850f96",
                    "build_date": "2020-11-09T21:30:33.964949Z",
                    "build_snapshot": False,
                    "lucene_version": "8.7.0",
                    "minimum_wire_compatibility_version": "6.8.0",
                    "minimum_index_compatibility_version": "6.0.0-beta1",
                },
                "tagline": "You Know, for Search",
            }
        )
        httpserver.expect_request("/_cluster/health").respond_with_json(
            {
                "cluster_name": "opensearch",
                "status": "green",
                "timed_out": False,
                "number_of_nodes": 1,
                "number_of_data_nodes": 1,
                "active_primary_shards": 0,
                "active_shards": 0,
                "relocating_shards": 0,
                "initializing_shards": 0,
                "unassigned_shards": 0,
                "delayed_unassigned_shards": 0,
                "number_of_pending_tasks": 0,
                "number_of_in_flight_fetch": 0,
                "task_max_waiting_in_queue_millis": 0,
                "active_shards_percent_as_number": 100,
            }
        )

        manager = create_cluster_manager()
        assert isinstance(manager, CustomBackendManager)

        domain_key = DomainKey(
            domain_name=f"domain-{short_uid()}", region="us-east-1", account=TEST_AWS_ACCOUNT_ID
        )
        cluster = manager.create(domain_key.arn, OPENSEARCH_DEFAULT_VERSION)
        # check that we're using the domain endpoint strategy
        assert f"{domain_key.domain_name}." in cluster.url

        try:
            assert cluster.wait_is_up(10)
            retry(lambda: try_cluster_health(cluster.url), retries=3, sleep=5)

        finally:
            call_safe(cluster.shutdown)

        httpserver.check()