Esempio n. 1
0
 def __init__(self, region, name, tags):
     self.name = name
     self.region = region
     self.arn = "arn:aws:logs:{region}:1:log-group:{log_group}".format(
         region=region, log_group=name)
     self.creationTime = unix_time_millis()
     self.tags = tags
     self.streams = dict()  # {name: LogStream}
Esempio n. 2
0
 def __init__(self, region, name, tags):
     self.name = name
     self.region = region
     self.arn = "arn:aws:logs:{region}:1:log-group:{log_group}".format(
         region=region, log_group=name)
     self.creationTime = unix_time_millis()
     self.tags = tags
     self.streams = dict()  # {name: LogStream}
     self.retentionInDays = None  # AWS defaults to Never Expire for log group retention
Esempio n. 3
0
    def put_log_events(self, log_group_name, log_stream_name, log_events, sequence_token):
        # TODO: ensure sequence_token
        # TODO: to be thread safe this would need a lock
        self.lastIngestionTime = unix_time_millis()
        # TODO: make this match AWS if possible
        self.storedBytes += sum([len(log_event["message"]) for log_event in log_events])
        self.events += [LogEvent(self.lastIngestionTime, log_event) for log_event in log_events]
        self.uploadSequenceToken += 1

        return self.uploadSequenceToken
Esempio n. 4
0
    def __init__(self, region, log_group, name):
        self.region = region
        self.arn = "arn:aws:logs:{region}:{id}:log-group:{log_group}:log-stream:{log_stream}".format(
            region=region, id=self.__class__._log_ids, log_group=log_group, log_stream=name)
        self.creationTime = unix_time_millis()
        self.firstEventTimestamp = None
        self.lastEventTimestamp = None
        self.lastIngestionTime = None
        self.logStreamName = name
        self.storedBytes = 0
        self.uploadSequenceToken = 0  # I'm  guessing this is token needed for sequenceToken by put_events
        self.events = []

        self.__class__._log_ids += 1
Esempio n. 5
0
    def mark_received(self, visibility_timeout=None):
        """
        When a message is received we will set the first receive timestamp,
        tap the ``approximate_receive_count`` and the ``visible_at`` time.
        """
        if visibility_timeout:
            visibility_timeout = int(visibility_timeout)
        else:
            visibility_timeout = 0

        if not self.approximate_first_receive_timestamp:
            self.approximate_first_receive_timestamp = unix_time_millis()

        self.approximate_receive_count += 1

        # Make message visible again in the future unless its
        # destroyed.
        if visibility_timeout:
            self.change_visibility(visibility_timeout)

        self.receipt_handle = generate_receipt_handle()
Esempio n. 6
0
 def delay(self, delay_seconds):
     delay_msec = int(delay_seconds) * 1000
     self.delayed_until = unix_time_millis() + delay_msec
Esempio n. 7
0
def test_put_subscription_filter_with_firehose():
    # given
    region_name = "us-east-1"
    client_firehose = boto3.client("firehose", region_name)
    client_logs = boto3.client("logs", region_name)

    log_group_name = "/firehose-test"
    log_stream_name = "delivery-stream"
    client_logs.create_log_group(logGroupName=log_group_name)
    client_logs.create_log_stream(logGroupName=log_group_name,
                                  logStreamName=log_stream_name)

    # Create a S3 bucket.
    bucket_name = "firehosetestbucket"
    s3_client = boto3.client("s3", region_name=region_name)
    s3_client.create_bucket(
        Bucket=bucket_name,
        CreateBucketConfiguration={"LocationConstraint": "us-west-1"},
    )

    # Create the Firehose delivery stream that uses that S3 bucket as
    # the destination.
    delivery_stream_name = "firehose_log_test"
    firehose_arn = client_firehose.create_delivery_stream(
        DeliveryStreamName=delivery_stream_name,
        ExtendedS3DestinationConfiguration={
            "RoleARN": _get_role_name(region_name),
            "BucketARN": f"arn:aws:s3::{bucket_name}",
        },
    )["DeliveryStreamARN"]

    # when
    client_logs.put_subscription_filter(
        logGroupName=log_group_name,
        filterName="firehose-test",
        filterPattern="",
        destinationArn=firehose_arn,
    )

    # then
    response = client_logs.describe_subscription_filters(
        logGroupName=log_group_name)
    response["subscriptionFilters"].should.have.length_of(1)
    _filter = response["subscriptionFilters"][0]
    _filter["creationTime"].should.be.a(int)
    _filter["destinationArn"] = firehose_arn
    _filter["distribution"] = "ByLogStream"
    _filter["logGroupName"] = "/firehose-test"
    _filter["filterName"] = "firehose-test"
    _filter["filterPattern"] = ""

    # when
    ts_0 = int(unix_time_millis(datetime.utcnow()))
    ts_1 = int(unix_time_millis(datetime.utcnow()))
    client_logs.put_log_events(
        logGroupName=log_group_name,
        logStreamName=log_stream_name,
        logEvents=[
            {
                "timestamp": ts_0,
                "message": "test"
            },
            {
                "timestamp": ts_1,
                "message": "test 2"
            },
        ],
    )

    # then
    bucket_objects = s3_client.list_objects_v2(Bucket=bucket_name)
    message = s3_client.get_object(Bucket=bucket_name,
                                   Key=bucket_objects["Contents"][0]["Key"])
    response = json.loads(
        zlib.decompress(message["Body"].read(),
                        16 + zlib.MAX_WBITS).decode("utf-8"))

    response["messageType"].should.equal("DATA_MESSAGE")
    response["owner"].should.equal("123456789012")
    response["logGroup"].should.equal("/firehose-test")
    response["logStream"].should.equal("delivery-stream")
    response["subscriptionFilters"].should.equal(["firehose-test"])
    log_events = sorted(response["logEvents"],
                        key=lambda log_event: log_event["id"])
    log_events.should.have.length_of(2)
    log_events[0]["id"].should.be.a(int)
    log_events[0]["message"].should.equal("test")
    log_events[0]["timestamp"].should.equal(ts_0)
    log_events[1]["id"].should.be.a(int)
    log_events[1]["message"].should.equal("test 2")
    log_events[1]["timestamp"].should.equal(ts_1)
Esempio n. 8
0
 def mark_sent(self, delay_seconds=None):
     self.sent_timestamp = unix_time_millis()
     if delay_seconds:
         self.delay(delay_seconds=delay_seconds)
Esempio n. 9
0
    def _invoke_lambda(self, code, event=None, context=None):
        # TODO: context not yet implemented
        if event is None:
            event = dict()
        if context is None:
            context = {}
        output = None

        try:
            # TODO: I believe we can keep the container running and feed events as needed
            #       also need to hook it up to the other services so it can make kws/s3 etc calls
            #  Should get invoke_id /RequestId from invocation
            env_vars = {
                "_HANDLER": self.handler,
                "AWS_EXECUTION_ENV": "AWS_Lambda_{}".format(self.run_time),
                "AWS_LAMBDA_FUNCTION_TIMEOUT": self.timeout,
                "AWS_LAMBDA_FUNCTION_NAME": self.function_name,
                "AWS_LAMBDA_FUNCTION_MEMORY_SIZE": self.memory_size,
                "AWS_LAMBDA_FUNCTION_VERSION": self.version,
                "AWS_REGION": self.region,
                "AWS_ACCESS_KEY_ID": "role-account-id",
                "AWS_SECRET_ACCESS_KEY": "role-secret-key",
                "AWS_SESSION_TOKEN": "session-token",
            }

            env_vars.update(self.environment_vars)

            container = exit_code = None
            log_config = docker.types.LogConfig(
                type=docker.types.LogConfig.types.JSON)
            with _DockerDataVolumeContext(self) as data_vol:
                try:
                    self.docker_client.ping()  # Verify Docker is running
                    run_kwargs = (dict(links={"motoserver": "motoserver"})
                                  if settings.TEST_SERVER_MODE else {})
                    container = self.docker_client.containers.run(
                        "lambci/lambda:{}".format(self.run_time),
                        [self.handler, json.dumps(event)],
                        remove=False,
                        mem_limit="{}m".format(self.memory_size),
                        volumes=["{}:/var/task".format(data_vol.name)],
                        environment=env_vars,
                        detach=True,
                        log_config=log_config,
                        **run_kwargs)
                finally:
                    if container:
                        try:
                            exit_code = container.wait(timeout=300)
                        except requests.exceptions.ReadTimeout:
                            exit_code = -1
                            container.stop()
                            container.kill()
                        else:
                            if docker_3:
                                exit_code = exit_code["StatusCode"]

                        output = container.logs(stdout=False, stderr=True)
                        output += container.logs(stdout=True, stderr=False)
                        container.remove()

            output = output.decode("utf-8")

            # Send output to "logs" backend
            invoke_id = uuid.uuid4().hex
            log_stream_name = "{date.year}/{date.month:02d}/{date.day:02d}/[{version}]{invoke_id}".format(
                date=datetime.datetime.utcnow(),
                version=self.version,
                invoke_id=invoke_id,
            )

            self.logs_backend.create_log_stream(self.logs_group_name,
                                                log_stream_name)

            log_events = [{
                "timestamp": unix_time_millis(),
                "message": line
            } for line in output.splitlines()]
            self.logs_backend.put_log_events(self.logs_group_name,
                                             log_stream_name, log_events, None)

            if exit_code != 0:
                raise Exception(
                    "lambda invoke failed output: {}".format(output))

            # We only care about the response from the lambda
            # Which is the last line of the output, according to https://github.com/lambci/docker-lambda/issues/25
            resp = output.splitlines()[-1]
            logs = os.linesep.join(
                [line for line in self.convert(output).splitlines()[:-1]])
            return resp, False, logs
        except docker.errors.DockerException as e:
            # Docker itself is probably not running - there will be no Lambda-logs to handle
            return "error running docker: {}".format(e), True, ""
        except BaseException as e:
            traceback.print_exc()
            logs = os.linesep.join(
                [line for line in self.convert(output).splitlines()[:-1]])
            return "error running lambda: {}".format(e), True, logs
Esempio n. 10
0
def test_put_subscription_filter_with_lambda():
    # given
    region_name = "us-east-1"
    client_lambda = boto3.client("lambda", region_name)
    client_logs = boto3.client("logs", region_name)
    log_group_name = "/test"
    log_stream_name = "stream"
    client_logs.create_log_group(logGroupName=log_group_name)
    client_logs.create_log_stream(logGroupName=log_group_name,
                                  logStreamName=log_stream_name)
    function_arn = client_lambda.create_function(
        FunctionName="test",
        Runtime="python3.8",
        Role=_get_role_name(region_name),
        Handler="lambda_function.lambda_handler",
        Code={"ZipFile": _get_test_zip_file()},
        Description="test lambda function",
        Timeout=3,
        MemorySize=128,
        Publish=True,
    )["FunctionArn"]

    # when
    client_logs.put_subscription_filter(
        logGroupName=log_group_name,
        filterName="test",
        filterPattern="",
        destinationArn=function_arn,
    )

    # then
    response = client_logs.describe_subscription_filters(
        logGroupName=log_group_name)
    response["subscriptionFilters"].should.have.length_of(1)
    sub_filter = response["subscriptionFilters"][0]
    sub_filter["creationTime"].should.be.a(int)
    sub_filter[
        "destinationArn"] = "arn:aws:lambda:us-east-1:123456789012:function:test"
    sub_filter["distribution"] = "ByLogStream"
    sub_filter["logGroupName"] = "/test"
    sub_filter["filterName"] = "test"
    sub_filter["filterPattern"] = ""

    # when
    ts_0 = int(unix_time_millis(datetime.utcnow()))
    ts_1 = int(unix_time_millis(datetime.utcnow())) + 10
    client_logs.put_log_events(
        logGroupName=log_group_name,
        logStreamName=log_stream_name,
        logEvents=[
            {
                "timestamp": ts_0,
                "message": "test"
            },
            {
                "timestamp": ts_1,
                "message": "test 2"
            },
        ],
    )

    # then
    msg_showed_up, received_message = _wait_for_log_msg(
        client_logs, "/aws/lambda/test", "awslogs")
    assert msg_showed_up, "CloudWatch log event was not found. All logs: {}".format(
        received_message)

    data = json.loads(received_message)["awslogs"]["data"]
    response = json.loads(
        zlib.decompress(base64.b64decode(data),
                        16 + zlib.MAX_WBITS).decode("utf-8"))
    response["messageType"].should.equal("DATA_MESSAGE")
    response["owner"].should.equal("123456789012")
    response["logGroup"].should.equal("/test")
    response["logStream"].should.equal("stream")
    response["subscriptionFilters"].should.equal(["test"])
    log_events = sorted(response["logEvents"],
                        key=lambda log_event: log_event["id"])
    log_events.should.have.length_of(2)
    log_events[0]["id"].should.be.a(int)
    log_events[0]["message"].should.equal("test")
    log_events[0]["timestamp"].should.equal(ts_0)
    log_events[1]["id"].should.be.a(int)
    log_events[1]["message"].should.equal("test 2")
    log_events[1]["timestamp"].should.equal(ts_1)
Esempio n. 11
0
    def _invoke_lambda(self, code, event=None, context=None):
        # TODO: context not yet implemented
        if event is None:
            event = dict()
        if context is None:
            context = {}

        try:
            # TODO: I believe we can keep the container running and feed events as needed
            #       also need to hook it up to the other services so it can make kws/s3 etc calls
            #  Should get invoke_id /RequestId from invovation
            env_vars = {
                "AWS_LAMBDA_FUNCTION_TIMEOUT": self.timeout,
                "AWS_LAMBDA_FUNCTION_NAME": self.function_name,
                "AWS_LAMBDA_FUNCTION_MEMORY_SIZE": self.memory_size,
                "AWS_LAMBDA_FUNCTION_VERSION": self.version,
                "AWS_REGION": self.region,
            }

            env_vars.update(self.environment_vars)

            container = output = exit_code = None
            with _DockerDataVolumeContext(self) as data_vol:
                try:
                    run_kwargs = dict(links={'motoserver': 'motoserver'}
                                      ) if settings.TEST_SERVER_MODE else {}
                    container = self.docker_client.containers.run(
                        "lambci/lambda:{}".format(self.run_time),
                        [self.handler, json.dumps(event)],
                        remove=False,
                        mem_limit="{}m".format(self.memory_size),
                        volumes=["{}:/var/task".format(data_vol.name)],
                        environment=env_vars,
                        detach=True,
                        **run_kwargs)
                finally:
                    if container:
                        try:
                            exit_code = container.wait(timeout=300)
                        except requests.exceptions.ReadTimeout:
                            exit_code = -1
                            container.stop()
                            container.kill()
                        else:
                            if docker_3:
                                exit_code = exit_code['StatusCode']

                        output = container.logs(stdout=False, stderr=True)
                        output += container.logs(stdout=True, stderr=False)
                        container.remove()

            output = output.decode('utf-8')

            # Send output to "logs" backend
            invoke_id = uuid.uuid4().hex
            log_stream_name = "{date.year}/{date.month:02d}/{date.day:02d}/[{version}]{invoke_id}".format(
                date=datetime.datetime.utcnow(),
                version=self.version,
                invoke_id=invoke_id)

            self.logs_backend.create_log_stream(self.logs_group_name,
                                                log_stream_name)

            log_events = [{
                'timestamp': unix_time_millis(),
                "message": line
            } for line in output.splitlines()]
            self.logs_backend.put_log_events(self.logs_group_name,
                                             log_stream_name, log_events, None)

            if exit_code != 0:
                raise Exception(
                    'lambda invoke failed output: {}'.format(output))

            # strip out RequestId lines
            output = os.linesep.join([
                line for line in self.convert(output).splitlines()
                if not _stderr_regex.match(line)
            ])
            return output, False
        except BaseException as e:
            traceback.print_exc()
            return "error running lambda: {}".format(e), True
Esempio n. 12
0
    def _invoke_lambda(self, code, event=None, context=None):
        # TODO: context not yet implemented
        if event is None:
            event = dict()
        if context is None:
            context = {}

        try:
            # TODO: I believe we can keep the container running and feed events as needed
            #       also need to hook it up to the other services so it can make kws/s3 etc calls
            #  Should get invoke_id /RequestId from invovation
            env_vars = {
                "AWS_LAMBDA_FUNCTION_TIMEOUT": self.timeout,
                "AWS_LAMBDA_FUNCTION_NAME": self.function_name,
                "AWS_LAMBDA_FUNCTION_MEMORY_SIZE": self.memory_size,
                "AWS_LAMBDA_FUNCTION_VERSION": self.version,
                "AWS_REGION": self.region,
            }

            env_vars.update(self.environment_vars)

            container = output = exit_code = None
            with _DockerDataVolumeContext(self) as data_vol:
                try:
                    run_kwargs = dict(links={'motoserver': 'motoserver'}) if settings.TEST_SERVER_MODE else {}
                    container = self.docker_client.containers.run(
                        "lambci/lambda:{}".format(self.run_time),
                        [self.handler, json.dumps(event)], remove=False,
                        mem_limit="{}m".format(self.memory_size),
                        volumes=["{}:/var/task".format(data_vol.name)], environment=env_vars, detach=True, **run_kwargs)
                finally:
                    if container:
                        try:
                            exit_code = container.wait(timeout=300)
                        except requests.exceptions.ReadTimeout:
                            exit_code = -1
                            container.stop()
                            container.kill()
                        else:
                            if docker_3:
                                exit_code = exit_code['StatusCode']

                        output = container.logs(stdout=False, stderr=True)
                        output += container.logs(stdout=True, stderr=False)
                        container.remove()

            output = output.decode('utf-8')

            # Send output to "logs" backend
            invoke_id = uuid.uuid4().hex
            log_stream_name = "{date.year}/{date.month:02d}/{date.day:02d}/[{version}]{invoke_id}".format(
                date=datetime.datetime.utcnow(), version=self.version, invoke_id=invoke_id
            )

            self.logs_backend.create_log_stream(self.logs_group_name, log_stream_name)

            log_events = [{'timestamp': unix_time_millis(), "message": line}
                          for line in output.splitlines()]
            self.logs_backend.put_log_events(self.logs_group_name, log_stream_name, log_events, None)

            if exit_code != 0:
                raise Exception(
                    'lambda invoke failed output: {}'.format(output))

            # strip out RequestId lines
            output = os.linesep.join([line for line in self.convert(output).splitlines() if not _stderr_regex.match(line)])
            return output, False
        except BaseException as e:
            traceback.print_exc()
            return "error running lambda: {}".format(e), True
Esempio n. 13
0
 def visible(self):
     current_time = unix_time_millis()
     if current_time > self.visible_at:
         return True
     return False
Esempio n. 14
0
def moto_put_log_events(self, log_group_name, log_stream_name, log_events):
    # TODO: call/patch upstream method here, instead of duplicating the code!
    self.last_ingestion_time = int(unix_time_millis())
    self.stored_bytes += sum(
        [len(log_event["message"]) for log_event in log_events])
    events = [
        logs_models.LogEvent(self.last_ingestion_time, log_event)
        for log_event in log_events
    ]
    self.events += events
    self.upload_sequence_token += 1

    # apply filterpattern -> only forward what matches the pattern
    if self.filter_pattern:
        # TODO only patched in pro
        matches = get_pattern_matcher(self.filter_pattern)
        events = [
            logs_models.LogEvent(self.last_ingestion_time, event)
            for event in log_events if matches(self.filter_pattern, event)
        ]

    if events and self.destination_arn:
        log_events = [{
            "id": str(event.event_id),
            "timestamp": event.timestamp,
            "message": event.message,
        } for event in events]

        data = {
            "messageType": "DATA_MESSAGE",
            "owner": aws_stack.get_account_id(),
            "logGroup": log_group_name,
            "logStream": log_stream_name,
            "subscriptionFilters": [self.filter_name],
            "logEvents": log_events,
        }

        output = io.BytesIO()
        with GzipFile(fileobj=output, mode="w") as f:
            f.write(json.dumps(data, separators=(",", ":")).encode("utf-8"))
        payload_gz_encoded = output.getvalue()
        event = {
            "awslogs": {
                "data": base64.b64encode(output.getvalue()).decode("utf-8")
            }
        }

        if ":lambda:" in self.destination_arn:
            client = aws_stack.connect_to_service("lambda")
            lambda_name = aws_stack.lambda_function_name(self.destination_arn)
            client.invoke(FunctionName=lambda_name, Payload=json.dumps(event))
        if ":kinesis:" in self.destination_arn:
            client = aws_stack.connect_to_service("kinesis")
            stream_name = aws_stack.kinesis_stream_name(self.destination_arn)
            client.put_record(
                StreamName=stream_name,
                Data=payload_gz_encoded,
                PartitionKey=log_group_name,
            )
        if ":firehose:" in self.destination_arn:
            client = aws_stack.connect_to_service("firehose")
            firehose_name = aws_stack.firehose_name(self.destination_arn)
            client.put_record(
                DeliveryStreamName=firehose_name,
                Record={"Data": payload_gz_encoded},
            )
    return "{:056d}".format(self.upload_sequence_token)
Esempio n. 15
0
 def change_visibility(self, visibility_timeout):
     # We're dealing with milliseconds internally
     visibility_timeout_msec = int(visibility_timeout) * 1000
     self.visible_at = unix_time_millis() + visibility_timeout_msec
Esempio n. 16
0
 def change_visibility(self, visibility_timeout):
     # We're dealing with milliseconds internally
     visibility_timeout_msec = int(visibility_timeout) * 1000
     self.visible_at = unix_time_millis() + visibility_timeout_msec
Esempio n. 17
0
 def mark_sent(self, delay_seconds=None):
     self.sent_timestamp = int(unix_time_millis())
     if delay_seconds:
         self.delay(delay_seconds=delay_seconds)
Esempio n. 18
0
 def __init__(self, policy_name, policy_document):
     self.policy_name = policy_name
     self.policy_document = policy_document
     self.last_updated_time = int(unix_time_millis())
Esempio n. 19
0
 def update(self, policy_document):
     self.policy_document = policy_document
     self.last_updated_time = int(unix_time_millis())
Esempio n. 20
0
 def delay(self, delay_seconds):
     delay_msec = int(delay_seconds) * 1000
     self.delayed_until = unix_time_millis() + delay_msec
Esempio n. 21
0
 def visible(self):
     current_time = unix_time_millis()
     if current_time > self.visible_at:
         return True
     return False
Esempio n. 22
0
 def delayed(self):
     current_time = unix_time_millis()
     if current_time < self.delayed_until:
         return True
     return False
Esempio n. 23
0
 def delayed(self):
     current_time = unix_time_millis()
     if current_time < self.delayed_until:
         return True
     return False