def test_create_and_then_remove_supported_resource_change_set(deploy_cfn_template, s3_client):
    first_bucket_name = f"test-bucket-1-{short_uid()}"
    second_bucket_name = f"test-bucket-2-{short_uid()}"

    stack = deploy_cfn_template(
        template=load_template_raw("for_removal_setup.yaml"),
        template_mapping={
            "first_bucket_name": first_bucket_name,
            "second_bucket_name": second_bucket_name,
        },
    )

    available_buckets = s3_client.list_buckets()
    bucket_names = [bucket["Name"] for bucket in available_buckets["Buckets"]]
    assert first_bucket_name in bucket_names
    assert second_bucket_name in bucket_names

    deploy_cfn_template(
        is_update=True,
        template=load_template_raw("for_removal_remove.yaml"),
        template_mapping={"first_bucket_name": first_bucket_name},
        stack_name=stack.stack_name,
    )

    def assert_bucket_gone():
        available_buckets = s3_client.list_buckets()
        bucket_names = [bucket["Name"] for bucket in available_buckets["Buckets"]]
        return first_bucket_name in bucket_names and second_bucket_name not in bucket_names

    poll_condition(condition=assert_bucket_gone, timeout=20, interval=5)
示例#2
0
def wait_container_is_ready(timeout: Optional[float] = None):
    """Blocks until the localstack main container is running and the ready marker has been printed."""
    container_name = config.MAIN_CONTAINER_NAME

    def is_container_running():
        return DOCKER_CLIENT.is_container_running(container_name)

    if not poll_condition(is_container_running, timeout=timeout):
        return False

    logfile = LocalstackContainer(container_name).logfile

    ready = threading.Event()

    def set_ready_if_marker_found(_line: str):
        if _line == constants.READY_MARKER_OUTPUT:
            ready.set()

    # start a tail on the logfile
    listener = FileListener(logfile, set_ready_if_marker_found)
    listener.start()

    try:
        # but also check the existing log in case the container has been running longer
        with open(logfile, "r") as fd:
            for line in fd:
                if constants.READY_MARKER_OUTPUT == line.strip():
                    return True

        # TODO: calculate remaining timeout
        return ready.wait(timeout)
    finally:
        listener.close()
示例#3
0
    def require(self, name: str) -> Service:
        """
        High level function that always returns a running service, or raises an error. If the service is in a state
        that it could be transitioned into a running state, then invoking this function will attempt that transition,
        e.g., by starting the service if it is available.
        """
        container = self.get_service_container(name)

        if not container:
            raise ValueError("no such service %s" % name)

        if container.state == ServiceState.STARTING:
            if not poll_condition(
                    lambda: container.state != ServiceState.STARTING,
                    timeout=30):
                raise TimeoutError("gave up waiting for service %s to start" %
                                   name)

        if container.state == ServiceState.STOPPING:
            if not poll_condition(
                    lambda: container.state == ServiceState.STOPPED,
                    timeout=30):
                raise TimeoutError("gave up waiting for service %s to stop" %
                                   name)

        with container.lock:
            if container.state == ServiceState.DISABLED:
                raise ServiceDisabled("service %s is disabled" % name)

            if container.state == ServiceState.RUNNING:
                return container.service

            if container.state == ServiceState.ERROR:
                # raise any capture error
                raise container.errors[-1]

            if container.state == ServiceState.AVAILABLE or container.state == ServiceState.STOPPED:
                if container.start():
                    return container.service
                else:
                    raise container.errors[-1]

        raise ServiceStateException(
            "service %s is not ready (%s) and could not be started" %
            (name, container.state))
示例#4
0
    def wait_is_up(self, timeout: float = None) -> bool:
        """
        Waits until the server is started and is_up returns true.

        :param timeout: the time in seconds to wait before returning false. If timeout is None, then wait indefinitely.
        :returns: true if the server is up, false if not or the timeout was reached while waiting.
        """
        # first wait until the started event was called
        self._started.wait(timeout=timeout)
        # then poll the health check
        return poll_condition(self.is_up, timeout=timeout)
示例#5
0
    def test_single_scheduled_run(self, dispatcher):
        scheduler, thread = self.create_and_start(dispatcher)

        task = DummyTask()
        invocation_time = time.time() + 0.2

        scheduler.schedule(task, start=invocation_time)

        assert poll_condition(lambda: len(task.invocations) >= 1, timeout=5)

        scheduler.close()
        thread.join(5)

        assert len(task.invocations) == 1
        assert task.invocations[0][0] == 1

        assert task.invocations[0][1] == pytest.approx(invocation_time, 0.1)
示例#6
0
    def test_periodic_run_fixed_with_longer_task(self):
        task = DummyTask(fn=lambda: time.sleep(1))

        scheduler, thread = self.create_and_start(None)

        scheduler.schedule(task, period=0.5, fixed_rate=True)
        scheduler.schedule(scheduler.close, start=time.time() + 1.25)

        thread.join(5)

        assert len(task.invocations) == 3

        first = task.invocations[0][1]
        assert first + 0.5 == pytest.approx(task.invocations[1][1], 0.1)
        assert first + 1 == pytest.approx(task.invocations[2][1], 0.1)

        assert poll_condition(lambda: len(task.completions) >= 3, timeout=5)
示例#7
0
def proxy_server(proxy_listener, host="127.0.0.1", port=None) -> str:
    """
    Create a temporary proxy server on a random port (or the specified port) with the given proxy listener
    for the duration of the context manager.
    """
    from localstack.services.generic_proxy import start_proxy_server

    host = host
    port = port or get_free_tcp_port()
    thread = start_proxy_server(port,
                                bind_address=host,
                                update_listener=proxy_listener)
    url = f"http://{host}:{port}"
    assert poll_condition(lambda: is_port_open(port),
                          timeout=5), f"server on port {port} did not start"
    yield url
    thread.stop()
示例#8
0
def sqs_collect_s3_events(sqs_client: "SQSClient",
                          queue_url: str,
                          min_events: int,
                          timeout: int = 10) -> List[Dict]:
    """
    Polls the given queue for the given amount of time and extracts and flattens from the received messages all
    events (messages that have a "Records" field in their body, and where the records can be json-deserialized).

    :param sqs_client: the boto3 client to use
    :param queue_url: the queue URL to listen from
    :param min_events: the minimum number of events to receive to wait for
    :param timeout: the number of seconds to wait before raising an assert error
    :return: a list with the deserialized records from the SQS messages
    """

    events = []

    def collect_events() -> int:
        _response = sqs_client.receive_message(QueueUrl=queue_url,
                                               WaitTimeSeconds=timeout,
                                               MaxNumberOfMessages=1)
        messages = _response.get("Messages", [])
        if not messages:
            LOG.info("no messages received from %s after %d seconds",
                     queue_url, timeout)

        for m in messages:
            body = m["Body"]
            # see https://www.mikulskibartosz.name/what-is-s3-test-event/
            if "s3:TestEvent" in body:
                continue

            assert "Records" in body, "Unexpected event received"

            doc = json.loads(body)
            events.extend(doc["Records"])

        return len(events)

    assert poll_condition(lambda: collect_events() >= min_events,
                          timeout=timeout)

    return events
示例#9
0
    def start_runtime_components():
        from localstack.services.edge import start_edge

        # TODO: we want a composable LocalStack runtime (edge proxy, service manager, dns, ...)
        t = start_thread(start_edge, quiet=False)

        # TODO: properly encapsulate starting/stopping of edge server in a class
        if not poll_condition(
                lambda: is_port_open(config.get_edge_port_http()),
                timeout=15,
                interval=0.3):
            if LOG.isEnabledFor(logging.DEBUG):
                # make another call with quiet=False to print detailed error logs
                is_port_open(config.get_edge_port_http(), quiet=False)
            raise TimeoutError(
                f"gave up waiting for edge server on {config.EDGE_BIND_HOST}:{config.EDGE_PORT}"
            )

        return t
示例#10
0
def sqs_collect_sns_messages(sqs_client: "SQSClient",
                             queue_url: str,
                             min_messages: int,
                             timeout: int = 10) -> List[Dict]:
    """
    Polls the given queue for the given amount of time and extracts the received SQS messages all SNS messages (messages that have a "TopicArn" field).

    :param sqs_client: the boto3 client to use
    :param queue_url: the queue URL connected to the topic
    :param min_messages: the minimum number of messages to wait for
    :param timeout: the number of seconds to wait before raising an assert error
    :return: a list with the deserialized SNS messages
    """

    collected_messages = []

    def collect_events() -> int:
        _response = sqs_client.receive_message(QueueUrl=queue_url,
                                               WaitTimeSeconds=timeout,
                                               MaxNumberOfMessages=1)
        messages = _response.get("Messages", [])
        if not messages:
            LOG.info("no messages received from %s after %d seconds",
                     queue_url, timeout)

        for m in messages:
            body = m["Body"]
            # see https://www.mikulskibartosz.name/what-is-s3-test-event/
            if "s3:TestEvent" in body:
                continue

            doc = json.loads(body)
            assert "TopicArn" in doc, f"unexpected event in message {m}"
            collected_messages.append(doc)

        return len(collected_messages)

    assert poll_condition(lambda: collect_events() >= min_messages,
                          timeout=timeout)

    return collected_messages
示例#11
0
def wait_container_is_ready(timeout: Optional[float] = None):
    """Blocks until the localstack main container is running and the ready marker has been printed."""
    container_name = config.MAIN_CONTAINER_NAME
    started = time.time()

    def is_container_running():
        return DOCKER_CLIENT.is_container_running(container_name)

    if not poll_condition(is_container_running, timeout=timeout):
        return False

    stream = DOCKER_CLIENT.stream_container_logs(container_name)

    # create a timer that will terminate the log stream after the remaining timeout
    timer = None
    if timeout:
        waited = time.time() - started
        remaining = timeout - waited
        # check the rare case that the timeout has already been reached
        if remaining <= 0:
            stream.close()
            return False
        timer = threading.Timer(remaining, stream.close)
        timer.start()

    try:
        for line in stream:
            line = line.decode("utf-8").strip()
            if line == constants.READY_MARKER_OUTPUT:
                return True

        # EOF was reached or the stream was closed
        return False
    finally:
        call_safe(stream.close)
        if timer:
            # make sure the timer is stopped (does nothing if it has already run)
            timer.cancel()
示例#12
0
def http_server(handler, host="127.0.0.1", port=None) -> str:
    """
    Create a temporary http server on a random port (or the specified port) with the given handler
    for the duration of the context manager.

    Example usage:

        def handler(request, data):
            print(request.method, request.path, data)

        with testutil.http_server(handler) as url:
            requests.post(url, json={"message": "hello"})
    """
    from localstack.utils.server.http2_server import run_server

    host = host
    port = port or get_free_tcp_port()
    thread = run_server(port, [host], handler=handler, asynchronous=True)
    url = f"http://{host}:{port}"
    assert poll_condition(lambda: is_port_open(port),
                          timeout=5), f"server on port {port} did not start"
    yield url
    thread.stop()
示例#13
0
    def test_deletion_event_source_mapping_with_dynamodb(
            self, create_lambda_function, lambda_client, dynamodb_client,
            lambda_su_role):
        function_name = f"lambda_func-{short_uid()}"
        ddb_table = f"ddb_table-{short_uid()}"

        create_lambda_function(
            func_name=function_name,
            handler_file=TEST_LAMBDA_PYTHON_ECHO,
            runtime=LAMBDA_RUNTIME_PYTHON36,
            role=lambda_su_role,
        )

        latest_stream_arn = aws_stack.create_dynamodb_table(
            table_name=ddb_table,
            partition_key="id",
            client=dynamodb_client,
            stream_view_type="NEW_IMAGE",
        )["TableDescription"]["LatestStreamArn"]

        lambda_client.create_event_source_mapping(
            FunctionName=function_name,
            EventSourceArn=latest_stream_arn,
            StartingPosition="TRIM_HORIZON",
        )

        def wait_for_table_created():
            return (dynamodb_client.describe_table(
                TableName=ddb_table)["Table"]["TableStatus"] == "ACTIVE")

        assert poll_condition(wait_for_table_created, timeout=30)

        dynamodb_client.delete_table(TableName=ddb_table)

        result = lambda_client.list_event_source_mappings(
            EventSourceArn=latest_stream_arn)
        assert 1 == len(result["EventSourceMappings"])
示例#14
0
def serve_asgi_app():
    _servers = []

    def _create(app: ASGI3Framework,
                config: Config = None,
                event_loop: AbstractEventLoop = None) -> HypercornServer:
        if not config:
            config = Config()
            config.bind = f"localhost:{net.get_free_tcp_port()}"

        srv = HypercornServer(app, config, loop=event_loop)
        _servers.append(srv)
        srv.start()
        assert srv.wait_is_up(
            timeout=10), "gave up waiting for server to start up"
        return srv

    yield _create

    for server in _servers:
        server.shutdown()
        assert poll_condition(
            lambda: not server.is_up(),
            timeout=10), "gave up waiting for server to shut down"
示例#15
0
    def test_event_source_mapping_default_batch_size(
        self,
        create_lambda_function,
        lambda_client,
        sqs_client,
        sqs_create_queue,
        sqs_queue_arn,
        dynamodb_client,
        dynamodb_create_table,
        lambda_su_role,
    ):
        function_name = f"lambda_func-{short_uid()}"
        queue_name_1 = f"queue-{short_uid()}-1"
        queue_name_2 = f"queue-{short_uid()}-2"
        ddb_table = f"ddb_table-{short_uid()}"

        create_lambda_function(
            func_name=function_name,
            handler_file=TEST_LAMBDA_PYTHON_ECHO,
            runtime=LAMBDA_RUNTIME_PYTHON36,
            role=lambda_su_role,
        )

        queue_url_1 = sqs_create_queue(QueueName=queue_name_1)
        queue_arn_1 = sqs_queue_arn(queue_url_1)

        rs = lambda_client.create_event_source_mapping(
            EventSourceArn=queue_arn_1, FunctionName=function_name)
        assert BATCH_SIZE_RANGES["sqs"][0] == rs["BatchSize"]
        uuid = rs["UUID"]

        def wait_for_event_source_mapping():
            return lambda_client.get_event_source_mapping(
                UUID=uuid)["State"] == "Enabled"

        assert poll_condition(wait_for_event_source_mapping, timeout=30)

        with pytest.raises(ClientError) as e:
            # Update batch size with invalid value
            lambda_client.update_event_source_mapping(
                UUID=uuid,
                FunctionName=function_name,
                BatchSize=BATCH_SIZE_RANGES["sqs"][1] + 1,
            )
        e.match(INVALID_PARAMETER_VALUE_EXCEPTION)

        queue_url_2 = sqs_create_queue(QueueName=queue_name_2)
        queue_arn_2 = sqs_queue_arn(queue_url_2)

        with pytest.raises(ClientError) as e:
            # Create event source mapping with invalid batch size value
            lambda_client.create_event_source_mapping(
                EventSourceArn=queue_arn_2,
                FunctionName=function_name,
                BatchSize=BATCH_SIZE_RANGES["sqs"][1] + 1,
            )
        e.match(INVALID_PARAMETER_VALUE_EXCEPTION)

        table_description = dynamodb_create_table(
            table_name=ddb_table,
            partition_key="id",
            stream_view_type="NEW_IMAGE",
        )["TableDescription"]

        # table ARNs are not sufficient as event source, needs to be a dynamodb stream arn
        if not is_old_provider():
            with pytest.raises(ClientError) as e:
                lambda_client.create_event_source_mapping(
                    EventSourceArn=table_description["TableArn"],
                    FunctionName=function_name,
                    StartingPosition="LATEST",
                )
            e.match(INVALID_PARAMETER_VALUE_EXCEPTION)

        # check if event source mapping can be created with latest stream ARN
        rs = lambda_client.create_event_source_mapping(
            EventSourceArn=table_description["LatestStreamArn"],
            FunctionName=function_name,
            StartingPosition="LATEST",
        )

        assert BATCH_SIZE_RANGES["dynamodb"][0] == rs["BatchSize"]
示例#16
0
    def test_disabled_event_source_mapping_with_dynamodb(
        self,
        create_lambda_function,
        lambda_client,
        dynamodb_resource,
        dynamodb_client,
        dynamodb_create_table,
        logs_client,
        dynamodbstreams_client,
        lambda_su_role,
    ):
        function_name = f"lambda_func-{short_uid()}"
        ddb_table = f"ddb_table-{short_uid()}"

        create_lambda_function(
            func_name=function_name,
            handler_file=TEST_LAMBDA_PYTHON_ECHO,
            runtime=LAMBDA_RUNTIME_PYTHON36,
            role=lambda_su_role,
        )

        latest_stream_arn = dynamodb_create_table(
            table_name=ddb_table,
            partition_key="id",
            stream_view_type="NEW_IMAGE"
        )["TableDescription"]["LatestStreamArn"]

        rs = lambda_client.create_event_source_mapping(
            FunctionName=function_name,
            EventSourceArn=latest_stream_arn,
            StartingPosition="TRIM_HORIZON",
            MaximumBatchingWindowInSeconds=1,
        )
        uuid = rs["UUID"]

        def wait_for_table_created():
            return (dynamodb_client.describe_table(
                TableName=ddb_table)["Table"]["TableStatus"] == "ACTIVE")

        assert poll_condition(wait_for_table_created, timeout=30)

        def wait_for_stream_created():
            return (dynamodbstreams_client.describe_stream(
                StreamArn=latest_stream_arn)["StreamDescription"]
                    ["StreamStatus"] == "ENABLED")

        assert poll_condition(wait_for_stream_created, timeout=30)

        table = dynamodb_resource.Table(ddb_table)

        items = [
            {
                "id": short_uid(),
                "data": "data1"
            },
            {
                "id": short_uid(),
                "data": "data2"
            },
        ]

        table.put_item(Item=items[0])

        def assert_events():
            events = get_lambda_log_events(function_name,
                                           logs_client=logs_client)

            # lambda was invoked 1 time
            assert 1 == len(events[0]["Records"])

        # might take some time against AWS
        retry(assert_events, sleep=3, retries=10)

        # disable event source mapping
        lambda_client.update_event_source_mapping(UUID=uuid, Enabled=False)

        table.put_item(Item=items[1])
        events = get_lambda_log_events(function_name, logs_client=logs_client)

        # lambda no longer invoked, still have 1 event
        assert 1 == len(events[0]["Records"])
示例#17
0
    def test_xray_header(
        self,
        s3_client,
        sqs_client,
        s3_create_bucket,
        sqs_create_queue,
        s3_create_sqs_bucket_notification,
        cleanups,
        snapshot,
    ):
        # test for https://github.com/localstack/localstack/issues/3686

        snapshot.add_transformer(snapshot.transform.sqs_api())
        snapshot.add_transformer(snapshot.transform.s3_api())
        snapshot.add_transformer(
            snapshot.transform.key_value("MD5OfBody",
                                         reference_replacement=False))

        # add boto hook
        def add_xray_header(request, **kwargs):
            request.headers[
                "X-Amzn-Trace-Id"] = "Root=1-3152b799-8954dae64eda91bc9a23a7e8;Parent=7fa8c0f79203be72;Sampled=1"

        s3_client.meta.events.register("before-send.s3.*", add_xray_header)
        # make sure the hook gets cleaned up after the test
        cleanups.append(lambda: s3_client.meta.events.unregister(
            "before-send.s3.*", add_xray_header))

        key = "test-data"
        bucket_name = s3_create_bucket()
        queue_url = sqs_create_queue()

        s3_create_sqs_bucket_notification(bucket_name, queue_url,
                                          ["s3:ObjectCreated:*"])

        # put an object where the bucket_name is in the path
        s3_client.put_object(Bucket=bucket_name, Key=key, Body="something")

        messages = []

        def get_messages():
            resp = sqs_client.receive_message(
                QueueUrl=queue_url,
                AttributeNames=["AWSTraceHeader"],
                MessageAttributeNames=["All"],
                VisibilityTimeout=0,
            )
            for m in resp["Messages"]:
                if "s3:TestEvent" in m["Body"]:
                    continue
                messages.append(m)

            return len(messages)

        assert poll_condition(lambda: get_messages() >= 1, timeout=10)

        assert "AWSTraceHeader" in messages[0]["Attributes"]
        assert (
            messages[0]["Attributes"]["AWSTraceHeader"] ==
            "Root=1-3152b799-8954dae64eda91bc9a23a7e8;Parent=7fa8c0f79203be72;Sampled=1"
        )
        snapshot.match("receive_messages", {"messages": messages})
示例#18
0
    def test_disabled_dynamodb_event_source_mapping(
        self,
        create_lambda_function,
        lambda_client,
        dynamodb_resource,
        dynamodb_client,
        dynamodb_create_table,
        logs_client,
        dynamodbstreams_client,
        lambda_su_role,
    ):
        def is_stream_enabled():
            return (dynamodbstreams_client.describe_stream(
                StreamArn=latest_stream_arn)["StreamDescription"]
                    ["StreamStatus"] == "ENABLED")

        function_name = f"lambda_func-{short_uid()}"
        ddb_table = f"ddb_table-{short_uid()}"
        items = [
            {
                "id": short_uid(),
                "data": "data1"
            },
            {
                "id": short_uid(),
                "data": "data2"
            },
        ]

        try:
            create_lambda_function(
                func_name=function_name,
                handler_file=TEST_LAMBDA_PYTHON_ECHO,
                runtime=LAMBDA_RUNTIME_PYTHON36,
                role=lambda_su_role,
            )
            latest_stream_arn = dynamodb_create_table(
                table_name=ddb_table,
                partition_key="id",
                stream_view_type="NEW_IMAGE"
            )["TableDescription"]["LatestStreamArn"]
            rs = lambda_client.create_event_source_mapping(
                FunctionName=function_name,
                EventSourceArn=latest_stream_arn,
                StartingPosition="TRIM_HORIZON",
                MaximumBatchingWindowInSeconds=1,
            )
            uuid = rs["UUID"]
            _await_event_source_mapping_enabled(lambda_client, uuid)

            assert poll_condition(is_stream_enabled, timeout=30)
            table = dynamodb_resource.Table(ddb_table)

            table.put_item(Item=items[0])
            # Lambda should be invoked 1 time
            retry(
                check_expected_lambda_log_events_length,
                retries=10,
                sleep=3,
                function_name=function_name,
                expected_length=1,
                logs_client=logs_client,
            )
            # disable event source mapping
            lambda_client.update_event_source_mapping(UUID=uuid, Enabled=False)
            time.sleep(2)
            table.put_item(Item=items[1])
            # lambda no longer invoked, still have 1 event
            check_expected_lambda_log_events_length(
                expected_length=1,
                function_name=function_name,
                logs_client=logs_client)
        finally:
            lambda_client.delete_event_source_mapping(UUID=uuid)
示例#19
0
 def wait_is_container_running(self, timeout=None) -> bool:
     return poll_condition(self.is_container_running, timeout)
示例#20
0
 def restart_alarms(*args):
     poll_condition(lambda: SERVICE_PLUGINS.is_running("cloudwatch"))
     self.alarm_scheduler.restart_existing_alarms()