Exemplo n.º 1
0
    def test_create_and_delete_log_stream(self, logs_client, logs_log_group):
        test_name = f"test-log-stream-{short_uid()}"
        log_streams_before = logs_client.describe_log_streams(
            logGroupName=logs_log_group).get("logStreams", [])

        logs_client.create_log_stream(logGroupName=logs_log_group,
                                      logStreamName=test_name)

        log_streams_between = logs_client.describe_log_streams(
            logGroupName=logs_log_group).get("logStreams", [])
        assert poll_condition(
            lambda: len(log_streams_between) == len(log_streams_before) + 1,
            timeout=5.0,
            interval=0.5,
        )

        logs_client.delete_log_stream(logGroupName=logs_log_group,
                                      logStreamName=test_name)

        log_streams_after = logs_client.describe_log_streams(
            logGroupName=logs_log_group).get("logStreams", [])
        assert poll_condition(
            lambda: len(log_streams_between) - 1 == len(log_streams_after),
            timeout=5.0,
            interval=0.5,
        )
        assert len(log_streams_after) == len(log_streams_before)
Exemplo n.º 2
0
def dynamodb_create_table(dynamodb_client):
    tables = []

    def factory(**kwargs):
        kwargs["client"] = dynamodb_client
        if "table_name" not in kwargs:
            kwargs["table_name"] = "test-table-%s" % short_uid()
        if "partition_key" not in kwargs:
            kwargs["partition_key"] = "id"

        kwargs["sleep_after"] = 0

        tables.append(kwargs["table_name"])

        return create_dynamodb_table(**kwargs)

    yield factory

    # cleanup
    for table in tables:
        try:
            # table has to be in ACTIVE state before deletion
            def wait_for_table_created():
                return (
                    dynamodb_client.describe_table(TableName=table)["Table"]["TableStatus"]
                    == "ACTIVE"
                )

            poll_condition(wait_for_table_created, timeout=30)
            dynamodb_client.delete_table(TableName=table)
        except Exception as e:
            LOG.debug("error cleaning up table %s: %s", table, e)
Exemplo n.º 3
0
    def _wait_for_consumer_ready(consumer_arn: str):
        def is_consumer_ready():
            describe_response = kinesis_client.describe_stream_consumer(
                ConsumerARN=consumer_arn)
            return describe_response["ConsumerDescription"][
                "ConsumerStatus"] == "ACTIVE"

        poll_condition(is_consumer_ready)
Exemplo n.º 4
0
    def _wait_for_stream_ready(stream_name: str):
        def is_stream_ready():
            describe_stream_response = kinesis_client.describe_stream(StreamName=stream_name)
            return describe_stream_response["StreamDescription"]["StreamStatus"] in [
                "ACTIVE",
                "UPDATING",
            ]

        poll_condition(is_stream_ready)
Exemplo n.º 5
0
    def test_start_and_stop(self):
        proxy_port = get_free_tcp_port()
        backend_port = get_free_tcp_port()

        server = start_proxy_for_service(
            "myservice",
            proxy_port,
            backend_port,
            update_listener=None,
            quiet=True,
            params={"protocol_version": "HTTP/1.0"},
        )

        self.assertIsNotNone(server)

        try:
            self.assertTrue(
                poll_condition(lambda: is_port_open(proxy_port), timeout=15),
                "gave up waiting for port %d" % proxy_port,
            )
        finally:
            print("stopping proxy server")
            server.stop()

        print("waiting max 15 seconds for server to terminate")
        server.join(timeout=15)

        self.assertFalse(is_port_open(proxy_port))
Exemplo n.º 6
0
    def test_route_through_edge(self):
        cluster_id = f"domain-{short_uid()}"
        cluster_url = f"http://localhost:{config.EDGE_PORT}/{cluster_id}"
        cluster = EdgeProxiedOpensearchCluster(cluster_url)

        try:
            cluster.start()
            assert cluster.wait_is_up(240), "gave up waiting for server"

            response = requests.get(cluster_url)
            assert response.ok, f"cluster endpoint returned an error: {response.text}"
            assert response.json()["version"]["number"] == "1.1.0"

            response = requests.get(f"{cluster_url}/_cluster/health")
            assert response.ok, f"cluster health endpoint returned an error: {response.text}"
            assert response.json()["status"] in [
                "red",
                "orange",
                "yellow",
                "green",
            ], "expected cluster state to be in a valid state"

        finally:
            cluster.shutdown()

        assert poll_condition(
            lambda: not cluster.is_up(), timeout=240
        ), "gave up waiting for cluster to shut down"
Exemplo n.º 7
0
def wait_container_is_ready(timeout: Optional[float] = None):
    """Blocks until the localstack main container is running and the ready marker has been printed."""
    container_name = config.MAIN_CONTAINER_NAME

    def is_container_running():
        return DOCKER_CLIENT.is_container_running(container_name)

    if not poll_condition(is_container_running, timeout=timeout):
        return False

    logfile = LocalstackContainer(container_name).logfile

    ready = threading.Event()

    def set_ready_if_marker_found(_line: str):
        if _line == constants.READY_MARKER_OUTPUT:
            ready.set()

    # start a tail on the logfile
    listener = FileListener(logfile, set_ready_if_marker_found)
    listener.start()

    try:
        # but also check the existing log in case the container has been running longer
        with open(logfile, "r") as fd:
            for line in fd:
                if constants.READY_MARKER_OUTPUT == line.strip():
                    return True

        # TODO: calculate remaining timeout
        return ready.wait(timeout)
    finally:
        listener.close()
Exemplo n.º 8
0
    def test_callback_exception_ignored(self, tail_engine, tmp_path):
        lines = []

        def callback(line):
            if "throw" in line:
                raise ValueError("oh noes")

            lines.append(line)

        file = tmp_path / "log.txt"
        file.touch()
        fd = open(file, "a")
        listener = FileListener(str(file), callback)
        listener.use_tail_command = tail_engine != "tailer"

        try:
            listener.start()
            assert listener.started.is_set()
            fd.write("hello" + os.linesep)
            fd.flush()
            fd.write("throw" + os.linesep)
            fd.write("pytest" + os.linesep)
            fd.flush()

            assert poll_condition(
                lambda: len(lines) == 2,
                timeout=3), ("expected two lines to appear. %s" % lines)

            assert lines[0] == "hello"
            assert lines[1] == "pytest"
        finally:
            fd.close()
            listener.close()
Exemplo n.º 9
0
    def test_basic_usage(self, tail_engine, tmp_path):
        lines = []

        file = tmp_path / "log.txt"
        file.touch()
        fd = open(file, "a")
        listener = FileListener(str(file), lines.append)
        listener.use_tail_command = tail_engine != "tailer"

        try:
            listener.start()
            assert listener.started.is_set()
            fd.write("hello" + os.linesep)
            fd.write("pytest" + os.linesep)
            fd.flush()

            assert poll_condition(
                lambda: len(lines) == 2,
                timeout=3), ("expected two lines to appear. %s" % lines)

            assert lines[0] == "hello"
            assert lines[1] == "pytest"
        finally:
            listener.close()

        try:
            fd.write("foobar" + os.linesep)
            time.sleep(0.5)
            assert len(
                lines
            ) == 2, "expected listener.stop() to stop listening on new "
        finally:
            fd.close()
Exemplo n.º 10
0
    def _wait_for_cluster(domain_name: str):
        def finished_processing():
            status = opensearch_client.describe_domain(
                DomainName=domain_name)["DomainStatus"]
            return status["Processing"] is False

        assert poll_condition(finished_processing, timeout=5 *
                              60), f"could not start domain: {domain_name}"
Exemplo n.º 11
0
    def require(self, name: str) -> Service:
        """
        High level function that always returns a running service, or raises an error. If the service is in a state
        that it could be transitioned into a running state, then invoking this function will attempt that transition,
        e.g., by starting the service if it is available.
        """
        container = self.get_service_container(name)

        if not container:
            raise ValueError("no such service %s" % name)

        if container.state == ServiceState.STARTING:
            if not poll_condition(
                    lambda: container.state != ServiceState.STARTING,
                    timeout=30):
                raise TimeoutError("gave up waiting for service %s to start" %
                                   name)

        if container.state == ServiceState.STOPPING:
            if not poll_condition(
                    lambda: container.state == ServiceState.STOPPED,
                    timeout=30):
                raise TimeoutError("gave up waiting for service %s to stop" %
                                   name)

        with container.lock:
            if container.state == ServiceState.DISABLED:
                raise ServiceDisabled("service %s is disabled" % name)

            if container.state == ServiceState.RUNNING:
                return container.service

            if container.state == ServiceState.ERROR:
                # raise any capture error
                raise container.errors[-1]

            if container.state == ServiceState.AVAILABLE or container.state == ServiceState.STOPPED:
                if container.start():
                    return container.service
                else:
                    raise container.errors[-1]

        raise ServiceStateException(
            "service %s is not ready (%s) and could not be started" %
            (name, container.state))
Exemplo n.º 12
0
    def wait_is_up(self, timeout: float = None) -> bool:
        """
        Waits until the server is started and is_up returns true.

        :param timeout: the time in seconds to wait before returning false. If timeout is None, then wait indefinitely.
        :returns: true if the server is up, false if not or the timeout was reached while waiting.
        """
        # first wait until the started event was called
        self._started.wait(timeout=timeout)
        # then poll the health check
        return poll_condition(self.is_up, timeout=timeout)
Exemplo n.º 13
0
def container_client():
    client = docker_utils.SdkDockerClient()

    yield client

    try:
        client.stop_container(config.MAIN_CONTAINER_NAME)
    except Exception:
        pass

    # wait until container has been removed
    assert poll_condition(
        lambda: not container_exists(client, config.MAIN_CONTAINER_NAME),
        timeout=20)
Exemplo n.º 14
0
    def test_create_and_delete_log_group(self, logs_client):
        test_name = f"test-log-group-{short_uid()}"
        log_groups_before = logs_client.describe_log_groups(
            logGroupNamePrefix="test-log-group-").get("logGroups", [])

        logs_client.create_log_group(logGroupName=test_name)

        log_groups_between = logs_client.describe_log_groups(
            logGroupNamePrefix="test-log-group-").get("logGroups", [])
        assert poll_condition(
            lambda: len(log_groups_between) == len(log_groups_before) + 1,
            timeout=5.0,
            interval=0.5)

        logs_client.delete_log_group(logGroupName=test_name)

        log_groups_after = logs_client.describe_log_groups(
            logGroupNamePrefix="test-log-group-").get("logGroups", [])
        assert poll_condition(
            lambda: len(log_groups_after) == len(log_groups_between) - 1,
            timeout=5.0,
            interval=0.5)
        assert len(log_groups_after) == len(log_groups_before)
Exemplo n.º 15
0
def _run_cluster_startup_monitor(cluster):
    region = ElasticsearchServiceBackend.get()
    LOG.debug("running cluster startup monitor for cluster %s", cluster)
    # wait until the cluster is started, or the timeout is reached
    status = poll_condition(cluster.is_up, timeout=CLUSTER_STARTUP_TIMEOUT, interval=5)

    LOG.debug("cluster state polling returned! status = %s", status)

    with _domain_mutex:
        LOG.debug("iterating over cluster domains %s", region.es_clusters.keys())
        for domain, domain_cluster in region.es_clusters.items():
            LOG.debug("checking cluster for domain %s", domain)
            if cluster is domain_cluster:
                if domain in region.es_domains:
                    region.es_domains[domain]["Created"] = status
Exemplo n.º 16
0
def proxy_server(proxy_listener, host="127.0.0.1", port=None) -> str:
    """
    Create a temporary proxy server on a random port (or the specified port) with the given proxy listener
    for the duration of the context manager.
    """
    from localstack.services.generic_proxy import start_proxy_server

    host = host
    port = port or get_free_tcp_port()
    thread = start_proxy_server(port, bind_address=host, update_listener=proxy_listener)
    url = f"http://{host}:{port}"
    assert poll_condition(
        lambda: is_port_open(port), timeout=5
    ), f"server on port {port} did not start"
    yield url
    thread.stop()
Exemplo n.º 17
0
def _run_cluster_startup_monitor(cluster):
    LOG.debug("running cluster startup monitor for cluster %s", cluster)
    # wait until the cluster is started, or the timeout is reached
    status = poll_condition(cluster.is_up,
                            timeout=CLUSTER_STARTUP_TIMEOUT,
                            interval=5)

    LOG.debug("cluster state polling returned! status = %s", status)

    with _domain_mutex:
        LOG.debug("iterating over cluster domains %s", ES_CLUSTERS.keys())
        for domain, domain_cluster in ES_CLUSTERS.items():
            LOG.debug("checking cluster for domain %s", domain)
            if cluster is domain_cluster:
                if domain in ES_DOMAINS:
                    ES_DOMAINS[domain]["Created"] = status
Exemplo n.º 18
0
    def test_multiplexing_cluster(self, monkeypatch):
        monkeypatch.setattr(config, "ES_ENDPOINT_STRATEGY", "domain")
        monkeypatch.setattr(config, "ES_MULTI_CLUSTER", False)

        manager = MultiplexingClusterManager()

        # create two elasticsearch domains
        domain0_name = f"domain-{short_uid()}"
        domain1_name = f"domain-{short_uid()}"
        domain0_arn = get_domain_arn(domain0_name, "us-east-1",
                                     TEST_AWS_ACCOUNT_ID)
        domain1_arn = get_domain_arn(domain1_name, "us-east-1",
                                     TEST_AWS_ACCOUNT_ID)
        cluster0 = manager.create(domain0_arn, dict(DomainName=domain0_name))
        cluster1 = manager.create(domain1_arn, dict(DomainName=domain1_name))

        try:
            # spawn the two clusters
            assert cluster0.wait_is_up(240)
            assert cluster1.wait_is_up(240)

            retry(lambda: try_cluster_health(cluster0.url),
                  retries=12,
                  sleep=10)
            retry(lambda: try_cluster_health(cluster1.url),
                  retries=12,
                  sleep=10)

            # create an index in cluster0, wait for it to appear, make sure it's not in cluster1
            index0_url = cluster0.url + "/my-index?pretty"
            index1_url = cluster1.url + "/my-index?pretty"

            response = requests.put(index0_url)
            assert response.ok, "failed to put index into cluster %s: %s" % (
                cluster0.url,
                response.text,
            )
            assert poll_condition(lambda: requests.head(index0_url).ok,
                                  timeout=10), "gave up waiting for index"

            assert requests.head(
                index1_url).ok, "expected index to appear by multiplexing"

        finally:
            call_safe(cluster0.shutdown)
            call_safe(cluster1.shutdown)
Exemplo n.º 19
0
    def test_multiplexing_cluster(self, monkeypatch):
        monkeypatch.setattr(config, "OPENSEARCH_ENDPOINT_STRATEGY", "domain")
        monkeypatch.setattr(config, "OPENSEARCH_MULTI_CLUSTER", False)

        manager = MultiplexingClusterManager()

        # create two opensearch domains
        domain_key_0 = DomainKey(domain_name=f"domain-{short_uid()}",
                                 region="us-east-1",
                                 account=TEST_AWS_ACCOUNT_ID)
        domain_key_1 = DomainKey(domain_name=f"domain-{short_uid()}",
                                 region="us-east-1",
                                 account=TEST_AWS_ACCOUNT_ID)
        cluster_0 = manager.create(domain_key_0.arn,
                                   OPENSEARCH_DEFAULT_VERSION)
        cluster_1 = manager.create(domain_key_1.arn,
                                   OPENSEARCH_DEFAULT_VERSION)

        try:
            # spawn the two clusters
            assert cluster_0.wait_is_up(240)
            assert cluster_1.wait_is_up(240)

            retry(lambda: try_cluster_health(cluster_0.url),
                  retries=12,
                  sleep=10)
            retry(lambda: try_cluster_health(cluster_1.url),
                  retries=12,
                  sleep=10)

            # create an index in cluster_0, wait for it to appear, make sure it's in cluster_1, too
            index_url_0 = cluster_0.url + "/my-index?pretty"
            index_url_1 = cluster_1.url + "/my-index?pretty"

            response = requests.put(index_url_0)
            assert response.ok, f"failed to put index into cluster {cluster_0.url}: {response.text}"
            assert poll_condition(lambda: requests.head(index_url_0).ok,
                                  timeout=10), "gave up waiting for index"

            assert requests.head(
                index_url_1).ok, "index should appear in second cluster"

        finally:
            call_safe(cluster_0.shutdown)
            call_safe(cluster_1.shutdown)
Exemplo n.º 20
0
    def test_run_and_stop_server(self):
        port = get_free_tcp_port()
        host = "127.0.0.1"

        LOG.info("%.2f starting server on port %d", time.time(), port)
        thread = run_server(port=port, bind_address=host, asynchronous=True)
        try:
            url = f"http://{host}:{port}"
            assert poll_condition(
                lambda: is_port_open(url, http_path="/"),
                timeout=15), f"gave up waiting for port {port}"
        finally:
            LOG.info("%.2f stopping server on port %d", time.time(), port)
            thread.stop()

        LOG.info("%.2f waiting on server to shut down", time.time())
        thread.join(timeout=15)
        assert not is_port_open(port), "port is still open after stop"
        LOG.info("%.2f port stopped %d", time.time(), port)
Exemplo n.º 21
0
    def start_runtime_components():
        from localstack.services.edge import start_edge
        from localstack.services.internal import LocalstackResourceHandler, get_internal_apis

        # serve internal APIs through the generic proxy
        ProxyListener.DEFAULT_LISTENERS.append(LocalstackResourceHandler(get_internal_apis()))

        # TODO: we want a composable LocalStack runtime (edge proxy, service manager, dns, ...)
        t = start_thread(start_edge, quiet=False)

        # TODO: properly encapsulate starting/stopping of edge server in a class
        if not poll_condition(
            lambda: is_port_open(config.get_edge_port_http()), timeout=5, interval=0.1
        ):
            raise TimeoutError(
                f"gave up waiting for edge server on {config.EDGE_BIND_HOST}:{config.EDGE_PORT}"
            )

        return t
Exemplo n.º 22
0
    def test_run_and_stop_server_from_different_threads(self):
        port = get_free_tcp_port()
        host = "127.0.0.1"

        LOG.info("%.2f starting server on port %d", time.time(), port)
        thread = run_server(port=port, bind_address=host, asynchronous=True)

        try:
            url = f"http://{host}:{port}"
            self.assertTrue(
                poll_condition(lambda: is_port_open(url, http_path="/"),
                               timeout=15),
                "gave up waiting for port %d " % port,
            )
        finally:
            LOG.info("%.2f stopping server on port %d", time.time(), port)
            threading.Thread(target=thread.stop).start()

        LOG.info("%.2f waiting on server to shut down", time.time())
        thread.join(timeout=15)
        self.assertFalse(is_port_open(port), "port is still open after stop")
        LOG.info("%.2f port stopped %d", time.time(), port)
Exemplo n.º 23
0
    def test_start_and_stop(self, monkeypatch):
        monkeypatch.setattr(config, "FORWARD_EDGE_INMEM", False)
        proxy_port = get_free_tcp_port()
        backend_port = get_free_tcp_port()

        server = start_proxy_for_service(
            "myservice",
            proxy_port,
            backend_port,
            update_listener=None,
            quiet=True,
        )

        assert server

        try:
            assert poll_condition(lambda: is_port_open(proxy_port), timeout=15)
        finally:
            server.stop()
            server.join(timeout=15)

        assert not is_port_open(proxy_port)
Exemplo n.º 24
0
def http_server(handler, host="127.0.0.1", port=None) -> str:
    """
    Create a temporary http server on a random port (or the specified port) with the given handler
    for the duration of the context manager.

    Example usage:

        def handler(request, data):
            print(request.method, request.path, data)

        with testutil.http_server(handler) as url:
            requests.post(url, json={"message": "hello"})
    """
    from localstack.utils.server.http2_server import run_server

    host = host
    port = port or get_free_tcp_port()
    thread = run_server(port, host, handler=handler, asynchronous=True)
    url = f"http://{host}:{port}"
    assert poll_condition(lambda: is_port_open(port),
                          timeout=5), f"server on port {port} did not start"
    yield url
    thread.stop()
Exemplo n.º 25
0
    def test_domain_creation(self):
        es_client = aws_stack.connect_to_service("es")

        # make sure we cannot re-create same domain name
        self.assertRaises(
            ClientError,
            es_client.create_elasticsearch_domain,
            DomainName=self.domain_name,
        )

        # get domain status
        status = es_client.describe_elasticsearch_domain(
            DomainName=self.domain_name)
        self.assertEqual(self.domain_name,
                         status["DomainStatus"]["DomainName"])
        self.assertTrue(status["DomainStatus"]["Created"])
        self.assertFalse(status["DomainStatus"]["Deleted"])

        # wait for domain to appear
        self.assertTrue(
            poll_condition(
                lambda: status["DomainStatus"].get("Processing") is False,
                timeout=30))
        self.assertEqual(
            "localhost:%s" % config.PORT_ELASTICSEARCH,
            status["DomainStatus"]["Endpoint"],
        )
        self.assertTrue(status["DomainStatus"]["EBSOptions"]["EBSEnabled"])

        # make sure we can fake adding tags to a domain
        response = es_client.add_tags(ARN="string",
                                      TagList=[{
                                          "Key": "SOME_TAG",
                                          "Value": "SOME_VALUE"
                                      }])
        self.assertEqual(200, response["ResponseMetadata"]["HTTPStatusCode"])
Exemplo n.º 26
0
    def test_kinesis_firehose_opensearch_s3_backup(
        self,
        firehose_client,
        kinesis_client,
        opensearch_client,
        s3_client,
        s3_bucket,
        kinesis_create_stream,
        monkeypatch,
        opensearch_endpoint_strategy,
    ):
        domain_name = f"test-domain-{short_uid()}"
        stream_name = f"test-stream-{short_uid()}"
        role_arn = "arn:aws:iam::000000000000:role/Firehose-Role"
        delivery_stream_name = f"test-delivery-stream-{short_uid()}"
        monkeypatch.setattr(config, "OPENSEARCH_ENDPOINT_STRATEGY",
                            opensearch_endpoint_strategy)
        try:
            opensearch_create_response = opensearch_client.create_domain(
                DomainName=domain_name)
            opensearch_url = f"http://{opensearch_create_response['DomainStatus']['Endpoint']}"
            opensearch_arn = opensearch_create_response["DomainStatus"]["ARN"]

            # create s3 backup bucket arn
            bucket_arn = aws_stack.s3_bucket_arn(s3_bucket)

            # create kinesis stream
            kinesis_create_stream(StreamName=stream_name, ShardCount=2)
            stream_arn = kinesis_client.describe_stream(
                StreamName=stream_name)["StreamDescription"]["StreamARN"]

            kinesis_stream_source_def = {
                "KinesisStreamARN": stream_arn,
                "RoleARN": role_arn,
            }
            opensearch_destination_configuration = {
                "RoleARN": role_arn,
                "DomainARN": opensearch_arn,
                "IndexName": "activity",
                "TypeName": "activity",
                "S3BackupMode": "AllDocuments",
                "S3Configuration": {
                    "RoleARN": role_arn,
                    "BucketARN": bucket_arn,
                },
            }
            firehose_client.create_delivery_stream(
                DeliveryStreamName=delivery_stream_name,
                DeliveryStreamType="KinesisStreamAsSource",
                KinesisStreamSourceConfiguration=kinesis_stream_source_def,
                AmazonopensearchserviceDestinationConfiguration=
                opensearch_destination_configuration,
            )

            # wait for opensearch cluster to be ready
            def check_domain_state():
                result = opensearch_client.describe_domain(
                    DomainName=domain_name)["DomainStatus"]["Processing"]
                return not result

            assert poll_condition(check_domain_state, 30, 1)

            # put kinesis stream record
            kinesis_record = {"target": "hello"}
            kinesis_client.put_record(StreamName=stream_name,
                                      Data=to_bytes(
                                          json.dumps(kinesis_record)),
                                      PartitionKey="1")

            firehose_record = {"target": "world"}
            firehose_client.put_record(
                DeliveryStreamName=delivery_stream_name,
                Record={"Data": to_bytes(json.dumps(firehose_record))},
            )

            def assert_opensearch_contents():
                response = requests.get(f"{opensearch_url}/activity/_search")
                response_bod = response.json()
                assert "hits" in response_bod
                response_bod_hits = response_bod["hits"]
                assert "hits" in response_bod_hits
                result = response_bod_hits["hits"]
                assert len(result) == 2
                sources = [item["_source"] for item in result]
                assert firehose_record in sources
                assert kinesis_record in sources

            retry(assert_opensearch_contents)

            def assert_s3_contents():
                result = s3_client.list_objects(Bucket=s3_bucket)
                contents = []
                for o in result.get("Contents"):
                    data = s3_client.get_object(Bucket=s3_bucket,
                                                Key=o.get("Key"))
                    content = data["Body"].read()
                    contents.append(content)
                assert len(contents) == 2
                assert to_bytes(json.dumps(firehose_record)) in contents
                assert to_bytes(json.dumps(kinesis_record)) in contents

            retry(assert_s3_contents)

        finally:
            firehose_client.delete_delivery_stream(
                DeliveryStreamName=delivery_stream_name)
            opensearch_client.delete_domain(DomainName=domain_name)
Exemplo n.º 27
0
 def wait_is_container_running(self, timeout=None) -> bool:
     return poll_condition(self.is_container_running, timeout)