示例#1
0
def describe_cluster(cluster_name, region=None):
    """
    Get detailed information about an existing cluster.

    :param cluster_name: Name of the cluster
    :type cluster_name: str
    :param region: AWS Region that the operation corresponds to.
    :type region: str

    :rtype: DescribeClusterResponseContent
    """
    cluster = Cluster(cluster_name)
    validate_cluster(cluster)
    cfn_stack = cluster.stack

    fleet_status = cluster.compute_fleet_status

    config_url = "NOT_AVAILABLE"
    try:
        config_url = cluster.config_presigned_url
    except ClusterActionError as e:
        # Do not fail request when S3 bucket is not available
        LOGGER.error(e)

    response = DescribeClusterResponseContent(
        creation_time=to_utc_datetime(cfn_stack.creation_time),
        version=cfn_stack.version,
        cluster_configuration=ClusterConfigurationStructure(url=config_url),
        tags=[Tag(value=tag.get("Value"), key=tag.get("Key")) for tag in cfn_stack.tags],
        cloud_formation_stack_status=cfn_stack.status,
        cluster_name=cluster_name,
        compute_fleet_status=fleet_status.value,
        cloudformation_stack_arn=cfn_stack.id,
        last_updated_time=to_utc_datetime(cfn_stack.last_updated_time),
        region=os.environ.get("AWS_DEFAULT_REGION"),
        cluster_status=cloud_formation_status_to_cluster_status(cfn_stack.status),
    )

    try:
        head_node = cluster.head_node_instance
        response.headnode = EC2Instance(
            instance_id=head_node.id,
            launch_time=to_utc_datetime(head_node.launch_time),
            public_ip_address=head_node.public_ip,
            instance_type=head_node.instance_type,
            state=InstanceState.from_dict(head_node.state),
            private_ip_address=head_node.private_ip,
        )
    except ClusterActionError as e:
        # This should not be treated as a failure cause head node might not be running in some cases
        LOGGER.info(e)

    return response
示例#2
0
def validate_timestamp(date_str: str, ts_name: str = "Time"):
    try:
        return to_utc_datetime(date_str)
    except Exception:
        raise BadRequestException(
            f"{ts_name} filter must be in the ISO 8601 format: YYYY-MM-DDThh:mm:ssZ. "
            "(e.g. 1984-09-15T19:20:30Z or 1984-09-15).")
def describe_cluster_instances_mock_response(instances):
    result = []
    for instance in instances:
        node_type = instance.get("node_type") or "HeadNode"
        response = {
            "instanceId":
            "i-0a9342a0000000000",
            "instanceType":
            "t2.micro",
            "launchTime":
            to_iso_timestr(to_utc_datetime("2021-04-30T00:00:00+00:00")),
            "nodeType":
            node_type,
            "privateIpAddress":
            "10.0.0.79",
            "publicIpAddress":
            "1.2.3.4",
            "state":
            "running",
        }
        if node_type == "Compute":
            response["nodeType"] = "ComputeNode"
            response["queueName"] = instance.get("queue_name")
        result.append(response)
    return {"instances": result}
 def convert_log(log):
     log["logStreamArn"] = log.pop("arn")
     if "storedBytes" in log:
         del log["storedBytes"]
     for ts_name in ["creationTime", "firstEventTimestamp", "lastEventTimestamp", "lastIngestionTime"]:
         log[ts_name] = to_iso_timestr(to_utc_datetime(log[ts_name]))
     return LogStream.from_dict(log)
 def test_successful_request(self, mocker, client, scheduler, status,
                             last_status_updated_time):
     mocker.patch("pcluster.aws.cfn.CfnClient.describe_stack",
                  return_value=cfn_describe_stack_mock_response(scheduler))
     if scheduler == "slurm":
         if status == "UNKNOWN":
             dynamodb_item = {}  # Test dynamodb item not exist
         else:
             dynamodb_item = {"Item": {"Status": status}}
             if last_status_updated_time:
                 last_status_updated_time = str(last_status_updated_time)
                 dynamodb_item["Item"][
                     "LastUpdatedTime"] = last_status_updated_time
         mocker.patch("pcluster.aws.dynamo.DynamoResource.get_item",
                      return_value=dynamodb_item)
     elif scheduler == "awsbatch":
         mocker.patch(
             "pcluster.aws.batch.BatchClient.get_compute_environment_state",
             return_value=status)
     response = self._send_test_request(client)
     with soft_assertions():
         assert_that(response.status_code).is_equal_to(200)
         expected_response = {"status": status}
         if last_status_updated_time:
             expected_response["lastStatusUpdatedTime"] = to_iso_timestr(
                 to_utc_datetime(last_status_updated_time))
         assert_that(response.get_json()).is_equal_to(expected_response)
示例#6
0
 def __call__(self, value):
     """Check if the given value is in the ISO8601 format."""
     try:
         return to_utc_datetime(value)
     except Exception as e:
         raise argparse.ArgumentTypeError(
             "Start time and end time filters must be in the ISO 8601 UTC format: YYYY-MM-DDThh:mm:ssZ "
             f"(e.g. 1984-09-15T19:20:30Z or 1984-09-15). {e}"
         )
示例#7
0
 def __init__(self, instance: ClusterInstance):
     self.launch_time = to_utc_datetime(instance.launch_time)
     self.instance_id = instance.id
     self.public_ip_address = instance.public_ip
     self.private_ip_address = instance.private_ip
     self.instance_type = instance.instance_type
     self.os = instance.os
     self.user = instance.default_user
     self.state = instance.state
     self.node_type = instance.node_type
示例#8
0
 def start_time(self):
     """Get start time filter."""
     if not self._start_time:
         try:
             creation_time = AWSApi.instance().logs.describe_log_group(
                 self._log_group_name).get("creationTime")
             self._start_time = to_utc_datetime(creation_time)
         except AWSClientError as e:
             raise FiltersParserError(
                 f"Unable to retrieve creation time of log group {self._log_group_name}, {str(e)}"
             )
     return self._start_time
    def test_execute(self, mocker, set_env, args):
        export_logs_mock = mocker.patch(
            "pcluster.cli.commands.cluster_logs.Cluster.export_logs",
            return_value=args.get("output_file", "https://u.r.l."),
        )
        set_env("AWS_DEFAULT_REGION", "us-east-1")

        command = ["export-cluster-logs"] + self._build_cli_args({
            **REQUIRED_ARGS,
            **args
        })
        out = run(command)
        if args.get("output_file") is not None:
            expected = {"path": os.path.realpath(args.get("output_file"))}
        else:
            expected = {"url": "https://u.r.l."}
        assert_that(out).is_equal_to(expected)
        assert_that(export_logs_mock.call_args).is_length(2)

        # verify arguments
        expected_params = {
            "bucket": "bucketname",
            "bucket_prefix": None,
            "keep_s3_objects": False,
            "filters": None,
            "start_time": None,
            "end_time": None,
        }
        expected_params.update(args)
        expected_params.update({
            "output_file":
            args.get("output_file")
            and os.path.realpath(args.get("output_file")),
            "start_time":
            args.get("start_time") and to_utc_datetime(args["start_time"]),
            "end_time":
            args.get("end_time") and to_utc_datetime(args["end_time"]),
        })
        export_logs_mock.assert_called_with(**expected_params)
示例#10
0
    def __init__(self, imagebuilder: ImageBuilder):
        super().__init__(imagebuilder=imagebuilder)
        self.stack_exist = True
        self.stack_name = imagebuilder.stack.name
        self.stack_status = imagebuilder.stack.status
        self.stack_arn = imagebuilder.stack.id
        self.tags = imagebuilder.stack.tags
        self.version = imagebuilder.stack.version
        self.creation_time = to_utc_datetime(imagebuilder.stack.creation_time)
        self.build_log = imagebuilder.stack.build_log

        # build image process status by stack status mapping
        self.imagebuild_status = imagebuilder.imagebuild_status
示例#11
0
 def __init__(self, imagebuilder: ImageBuilder):
     super().__init__(imagebuilder=imagebuilder)
     self.image_exist = True
     self.image_name = imagebuilder.image.name
     self.image_id = imagebuilder.image.pcluster_image_id
     self.ec2_image_id = imagebuilder.image.id
     self.image_state = imagebuilder.image.state
     self.image_architecture = imagebuilder.image.architecture
     self.image_tags = imagebuilder.image.tags
     self.imagebuild_status = "BUILD_COMPLETE"
     self.creation_time = to_utc_datetime(imagebuilder.image.creation_date)
     self.build_log = imagebuilder.image.build_log
     self.version = imagebuilder.image.version
def _stack_to_describe_image_response(imagebuilder):
    imagebuilder_image_state = imagebuilder.stack.image_state or {}
    return DescribeImageResponseContent(
        image_configuration=ImageConfigurationStructure(url=_presigned_config_url(imagebuilder)),
        image_id=imagebuilder.image_id,
        image_build_status=imagebuilder.imagebuild_status,
        image_build_logs_arn=imagebuilder.stack.build_log,
        imagebuilder_image_status=imagebuilder_image_state.get("status", None),
        imagebuilder_image_status_reason=imagebuilder_image_state.get("reason", None),
        cloudformation_stack_status=imagebuilder.stack.status,
        cloudformation_stack_status_reason=imagebuilder.stack.status_reason,
        cloudformation_stack_arn=imagebuilder.stack.id,
        cloudformation_stack_creation_time=to_utc_datetime(imagebuilder.stack.creation_time),
        cloudformation_stack_tags=[Tag(key=tag["Key"], value=tag["Value"]) for tag in imagebuilder.stack.tags],
        region=os_lib.environ.get("AWS_DEFAULT_REGION"),
        version=imagebuilder.stack.version,
    )
def _image_to_describe_image_response(imagebuilder):
    return DescribeImageResponseContent(
        creation_time=to_utc_datetime(imagebuilder.image.creation_date),
        image_configuration=ImageConfigurationStructure(url=_presigned_config_url(imagebuilder)),
        image_id=imagebuilder.image_id,
        image_build_status=ImageBuildStatus.BUILD_COMPLETE,
        ec2_ami_info=Ec2AmiInfo(
            ami_name=imagebuilder.image.name,
            ami_id=imagebuilder.image.id,
            state=imagebuilder.image.state.upper(),
            tags=[Tag(key=tag["Key"], value=tag["Value"]) for tag in imagebuilder.image.tags],
            architecture=imagebuilder.image.architecture,
            description=imagebuilder.image.description,
        ),
        region=os_lib.environ.get("AWS_DEFAULT_REGION"),
        version=imagebuilder.image.version,
    )
def describe_compute_fleet(cluster_name, region=None):
    """
    Describe the status of the compute fleet.

    :param cluster_name: Name of the cluster
    :type cluster_name: str
    :param region: AWS Region that the operation corresponds to.
    :type region: str

    :rtype: DescribeComputeFleetResponseContent
    """
    cluster = Cluster(cluster_name)
    validate_cluster(cluster)
    status, last_status_updated_time = cluster.compute_fleet_status_with_last_updated_time
    return DescribeComputeFleetResponseContent(
        last_status_updated_time=last_status_updated_time and to_utc_datetime(last_status_updated_time),
        status=status.value,
    )
def update_compute_fleet(update_compute_fleet_request_content, cluster_name, region=None):
    """
        Update the status of the cluster compute fleet.

    request_content:
        :type update_compute_fleet_request_content: dict | bytes
        :param cluster_name: Name of the cluster
        :type cluster_name: str
        :param region: AWS Region that the operation corresponds to.
        :type region: str

        :rtype: UpdateComputeFleetResponseContent
    """
    update_compute_fleet_request_content = UpdateComputeFleetRequestContent.from_dict(
        update_compute_fleet_request_content
    )
    cluster = Cluster(cluster_name)
    validate_cluster(cluster)

    status = update_compute_fleet_request_content.status
    if cluster.stack.scheduler == "slurm":
        if status == RequestedComputeFleetStatus.START_REQUESTED:
            cluster.start()
        elif status == RequestedComputeFleetStatus.STOP_REQUESTED:
            cluster.stop()
        else:
            raise BadRequestException(
                "the update compute fleet status can only be set to"
                " `START_REQUESTED` or `STOP_REQUESTED` for Slurm clusters."
            )
    else:
        if cluster.stack.scheduler == "awsbatch":
            if status == RequestedComputeFleetStatus.ENABLED:
                cluster.start()
            elif status == RequestedComputeFleetStatus.DISABLED:
                cluster.stop()
            else:
                raise BadRequestException(
                    "the update compute fleet status can only be set to"
                    " `ENABLED` or `DISABLED` for AWS Batch clusters."
                )
    status, last_status_updated_time = cluster.compute_fleet_status_with_last_updated_time
    last_status_updated_time = last_status_updated_time and to_utc_datetime(last_status_updated_time)
    return UpdateComputeFleetResponseContent(last_status_updated_time=last_status_updated_time, status=status.value)
示例#16
0
def describe_cluster_instances(cluster_name,
                               region=None,
                               next_token=None,
                               node_type=None,
                               queue_name=None):
    """
    Describe the instances belonging to a given cluster.

    :param cluster_name: Name of the cluster
    :type cluster_name: str
    :param region: AWS Region that the operation corresponds to.
    :type region: str
    :param next_token: Token to use for paginated requests.
    :type next_token: str
    :param node_type:
    :type node_type: dict | bytes
    :param queue_name:
    :type queue_name: str

    :rtype: DescribeClusterInstancesResponseContent
    """
    cluster = Cluster(cluster_name)
    node_type = api_node_type_to_cluster_node_type(node_type)
    instances, next_token = cluster.describe_instances(next_token=next_token,
                                                       node_type=node_type,
                                                       queue_name=queue_name)
    ec2_instances = []
    for instance in instances:
        ec2_instances.append(
            ClusterInstance(
                instance_id=instance.id,
                launch_time=to_utc_datetime(instance.launch_time),
                public_ip_address=instance.public_ip,
                instance_type=instance.instance_type,
                state=instance.state,
                private_ip_address=instance.private_ip,
                node_type=ApiNodeType.HEAD if instance.node_type
                == NodeType.HEAD_NODE.value else ApiNodeType.COMPUTE,
                queue_name=instance.queue_name,
            ))
    return DescribeClusterInstancesResponseContent(instances=ec2_instances,
                                                   next_token=next_token)
 def test_successful_request(self, mocker, client, scheduler, status,
                             last_status_updated_time):
     mocker.patch("pcluster.aws.cfn.CfnClient.describe_stack",
                  return_value=cfn_describe_stack_mock_response(scheduler))
     config_mock = mocker.patch("pcluster.models.cluster.Cluster.config")
     config_mock.scheduling.scheduler = scheduler
     if scheduler == "slurm":
         if status == "UNKNOWN":
             dynamodb_item = {}  # Test dynamodb item not exist
         else:
             dynamodb_item = {"Item": {"Status": status}}
             if last_status_updated_time:
                 last_status_updated_time = str(last_status_updated_time)
                 dynamodb_item["Item"][
                     "LastUpdatedTime"] = last_status_updated_time
         # mock the method to check the status before update
         mocker.patch("pcluster.aws.dynamo.DynamoResource.get_item",
                      return_value=dynamodb_item)
         # mock the method to update the item in dynamodb
         mocker.patch("pcluster.aws.dynamo.DynamoResource.put_item")
     elif scheduler == "awsbatch":
         mocker.patch(
             "pcluster.aws.batch.BatchClient.get_compute_environment_state",
             return_value=status)
         if status == "ENABLED":
             mocker.patch(
                 "pcluster.aws.batch.BatchClient.enable_compute_environment"
             )
         elif status == "DISABLED":
             mocker.patch(
                 "pcluster.aws.batch.BatchClient.disable_compute_environment"
             )
     response = self._send_test_request(client,
                                        request_body={"status": status})
     with soft_assertions():
         assert_that(response.status_code).is_equal_to(200)
         expected_response = {"status": status}
         if last_status_updated_time:
             expected_response["lastStatusUpdatedTime"] = to_iso_timestr(
                 to_utc_datetime(last_status_updated_time))
         assert_that(response.get_json()).is_equal_to(expected_response)
    def test_describe_image_in_failed_state_with_reasons_and_associated_imagebuilder_image(
            self, client, mocker):
        mocker.patch(
            "pcluster.aws.ec2.Ec2Client.describe_image_by_id_tag",
            side_effect=ImageNotFoundError("describe_image_by_id_tag"),
        )
        mocker.patch(
            "pcluster.aws.cfn.CfnClient.describe_stack",
            return_value=_create_stack("image1",
                                       CloudFormationStackStatus.CREATE_FAILED,
                                       "cfn test reason"),
        )
        mocker.patch("pcluster.aws.cfn.CfnClient.describe_stack_resource",
                     return_value=None)
        mocker.patch(
            "pcluster.aws.cfn.CfnClient.describe_stack_resource",
            return_value={
                "StackResourceDetail": {
                    "PhysicalResourceId": "test_id"
                }
            },
        )
        mocker.patch(
            "pcluster.aws.imagebuilder.ImageBuilderClient.get_image_state",
            return_value={
                "status": ImageBuilderImageStatus.FAILED,
                "reason": "img test reason"
            },
        )
        mocker.patch(
            "pcluster.api.controllers.image_operations_controller._presigned_config_url",
            return_value="https://parallelcluster.aws.com/bucket/key",
        )

        expected_response = {
            "cloudformationStackArn":
            "arn:image1",
            "imageBuildLogsArn":
            "arn:image1:build_log",
            "cloudformationStackCreationTime":
            to_iso_timestr(to_utc_datetime("2021-04-12 00:00:00")),
            "cloudformationStackTags": [
                {
                    "key": "parallelcluster:image_id",
                    "value": "image1"
                },
                {
                    "key": "parallelcluster:version",
                    "value": "3.0.0"
                },
                {
                    "key": "parallelcluster:build_config",
                    "value": "s3://bucket/key"
                },
                {
                    "key": "parallelcluster:build_log",
                    "value": "arn:image1:build_log"
                },
            ],
            "cloudformationStackStatus":
            CloudFormationStackStatus.CREATE_FAILED,
            "cloudformationStackStatusReason":
            "cfn test reason",
            "imageBuildStatus":
            ImageBuildStatus.BUILD_FAILED,
            "imageConfiguration": {
                "url": "https://parallelcluster.aws.com/bucket/key"
            },
            "imageId":
            "image1",
            "imagebuilderImageStatus":
            ImageBuilderImageStatus.FAILED,
            "imagebuilderImageStatusReason":
            "img test reason",
            "region":
            "us-east-1",
            "version":
            "3.0.0",
        }

        response = self._send_test_request(client, "image1")

        with soft_assertions():
            assert_that(response.status_code).is_equal_to(200)
            assert_that(response.get_json()).is_equal_to(expected_response)
示例#19
0
    def test_execute(self, mocker, set_env, test_datadir, args):
        mocked_result = [
            LogStream(
                FAKE_ID,
                "logstream",
                {
                    "events": [
                        {
                            "timestamp":
                            1622802790248,
                            "message":
                            ("2021-06-04 10:33:10,248 [DEBUG] CloudFormation client initialized "
                             "with endpoint https://cloudformation.eu-west-1.amazonaws.com"
                             ),
                            "ingestionTime":
                            1622802842382,
                        },
                        {
                            "timestamp":
                            1622802790248,
                            "message":
                            ("2021-06-04 10:33:10,248 [DEBUG] Describing resource HeadNodeLaunchTemplate in "
                             "stack test22"),
                            "ingestionTime":
                            1622802842382,
                        },
                        {
                            "timestamp":
                            1622802790390,
                            "message":
                            ("2021-06-04 10:33:10,390 [INFO] -----------------------Starting build"
                             "-----------------------"),
                            "ingestionTime":
                            1622802842382,
                        },
                    ],
                    "nextForwardToken":
                    "f/3618",
                    "nextBackwardToken":
                    "b/3619",
                    "ResponseMetadata": {},
                },
            )
        ] * 2 + [LogStream(FAKE_ID, "logstream", {})]
        get_image_log_events_mock = mocker.patch(
            "pcluster.api.controllers.image_logs_controller.ImageBuilder.get_log_events",
            side_effect=mocked_result)
        set_env("AWS_DEFAULT_REGION", "us-east-1")
        base_args = ["get-image-log-events"]
        command = base_args + self._build_cli_args({**REQUIRED_ARGS, **args})

        os.environ["TZ"] = "Europe/London"
        time.tzset()
        out = run(command)

        expected = json.loads(
            (test_datadir / "pcluster-out.txt").read_text().strip())
        assert_that(expected).is_equal_to(out)
        assert_that(get_image_log_events_mock.call_args).is_length(2)

        if args.get("limit", None):
            limit_val = get_image_log_events_mock.call_args[1].get("limit")
            assert_that(limit_val).is_type_of(int)

        # verify arguments
        kwargs = {
            "start_time":
            args.get("start_time", None)
            and to_utc_datetime(args["start_time"]),
            "end_time":
            args.get("end_time", None) and to_utc_datetime(args["end_time"]),
            "start_from_head":
            True if args.get("start_from_head", None) else None,
            "limit":
            int(args["limit"]) if args.get("limit", None) else None,
            "next_token":
            args.get("next_token", None),
        }
        get_image_log_events_mock.assert_called_with("log-stream-name",
                                                     **kwargs)
示例#20
0
    def test_successful_get_cluster_log_events_request(self, client, mocker,
                                                       mock_cluster_stack,
                                                       region, next_token,
                                                       start_from_head, limit,
                                                       start_time, end_time):
        cluster_name = "cluster"
        log_stream_name = "logstream"
        mock_log_events = [
            {
                "ingestionTime":
                1627524017632,
                "message":
                "Jan 01 00:00:00 ip-10-0-0-1 systemd: Started Session c20325 of "
                "user root.",
                "timestamp":
                1609459200000,
            },
            {
                "ingestionTime":
                1627524017632,
                "message":
                "Jan 01 00:00:00 ip-10-0-0-1 systemd: Removed slice User Slice "
                "of root.",
                "timestamp":
                1609459207000,
            },
        ]
        uid = "00000000-dddd-4444-bbbb-555555555555"
        mock_log_events_response = {
            "ResponseMetadata": {
                "HTTPHeaders": {
                    "content-length": "12345",
                    "content-type": "application/x-amz-json-1.1",
                    "date": "Fri, 01 Jan 2021 00:00:00 GMT",
                    "x-amzn-requestid": uid,
                },
                "HTTPStatusCode": 200,
                "RequestId": uid,
                "RetryAttempts": 0,
            },
            "events": mock_log_events,
            "nextBackwardToken": "b/123",
            "nextForwardToken": "f/456",
        }

        mock_log_stream = LogStream(cluster_name, log_stream_name,
                                    mock_log_events_response)

        get_log_events_mock = mocker.patch(
            "pcluster.models.cluster.Cluster.get_log_events",
            auto_spec=True,
            return_value=mock_log_stream,
        )

        mock_cluster_stack()

        response = self._send_test_request(client, cluster_name,
                                           log_stream_name, region, next_token,
                                           start_from_head, limit, start_time,
                                           end_time)

        expected_args = {
            "start_time": start_time and to_utc_datetime(start_time),
            "end_time": end_time and to_utc_datetime(end_time),
            "limit": limit,
            "start_from_head": start_from_head,
            "next_token": next_token,
        }
        get_log_events_mock.assert_called_with(log_stream_name,
                                               **expected_args)

        expected = {
            "events": [
                {
                    "message":
                    "Jan 01 00:00:00 ip-10-0-0-1 systemd: Started Session c20325 of user root.",
                    "timestamp": "2021-01-01T00:00:00.000Z",
                },
                {
                    "message":
                    "Jan 01 00:00:00 ip-10-0-0-1 systemd: Removed slice User Slice of root.",
                    "timestamp": "2021-01-01T00:00:07.000Z",
                },
            ],
            "nextToken":
            "f/456",
            "prevToken":
            "b/123",
        }
        assert_that(response.status_code).is_equal_to(200)
        assert_that(response.get_json()).is_equal_to(expected)
    def test_execute(self, mocker, set_env, args):
        logs = LogStreams()
        logs.log_streams = [
            {
                "logStreamName":
                "ip-10-0-0-102.i-0717e670ad2549e72.cfn-init",
                "creationTime":
                1622802842228,
                "firstEventTimestamp":
                1622802790248,
                "lastEventTimestamp":
                1622802893126,
                "lastIngestionTime":
                1622802903119,
                "uploadSequenceToken":
                "4961...",
                "arn":
                ("arn:aws:logs:eu-west-1:111:log-group:/aws/parallelcluster/"
                 "test22-202106041223:log-stream:ip-10-0-0-102.i-0717e670ad2549e72.cfn-init"
                 ),
                "storedBytes":
                0,
            },
            {
                "logStreamName":
                "ip-10-0-0-102.i-0717e670ad2549e72.chef-client",
                "creationTime":
                1622802842207,
                "firstEventTimestamp":
                1622802837114,
                "lastEventTimestamp":
                1622802861226,
                "lastIngestionTime":
                1622802897558,
                "uploadSequenceToken":
                "4962...",
                "arn":
                ("arn:aws:logs:eu-west-1:111:log-group:/aws/parallelcluster/"
                 "test22-202106041223:log-stream:ip-10-0-0-102.i-0717e670ad2549e72.chef-client"
                 ),
                "storedBytes":
                0,
            },
        ]

        logs.next_token = "123-456"

        list_log_streams_mock = mocker.patch(
            "pcluster.api.controllers.image_logs_controller.ImageBuilder.list_log_streams",
            return_value=logs)
        set_env("AWS_DEFAULT_REGION", "us-east-1")

        base_args = ["list-image-log-streams"]
        command = base_args + self._build_cli_args({**REQUIRED_ARGS, **args})

        out = run(command)
        # cfn stack events are not displayed if next-token is passed
        expected_out = [
            {
                "logStreamName":
                "ip-10-0-0-102.i-0717e670ad2549e72.cfn-init",
                "firstEventTimestamp":
                to_iso_timestr(to_utc_datetime(1622802790248)),
                "lastEventTimestamp":
                to_iso_timestr(to_utc_datetime(1622802893126)),
            },
            {
                "logStreamName":
                "ip-10-0-0-102.i-0717e670ad2549e72.chef-client",
                "firstEventTimestamp":
                to_iso_timestr(to_utc_datetime(1622802837114)),
                "lastEventTimestamp":
                to_iso_timestr(to_utc_datetime(1622802861226)),
            },
        ]
        assert_that(out["nextToken"]).is_equal_to(logs.next_token)
        for i in range(len(logs.log_streams)):
            select_keys = {
                "logStreamName", "firstEventTimestamp", "lastEventTimestamp"
            }
            out_select = {
                k: v
                for k, v in out["logStreams"][i].items() if k in select_keys
            }
            assert_that(out_select).is_equal_to(expected_out[i])
        assert_that(list_log_streams_mock.call_args).is_length(2)

        # verify arguments
        kwargs = {"next_token": None}
        kwargs.update(args)
        list_log_streams_mock.assert_called_with(**kwargs)
 def convert_event(event):
     event = {k[0].lower() + k[1:]: v for k, v in event.items()}
     event["timestamp"] = to_iso_timestr(to_utc_datetime(
         event["timestamp"]))
     return StackEvent.from_dict(event)
 def convert_log_event(event):
     del event["ingestionTime"]
     event["timestamp"] = to_iso_timestr(to_utc_datetime(
         event["timestamp"]))
     return LogEvent.from_dict(event)
示例#24
0
def test_datetime_to_epoch(set_tz, time_isoformat, time_zone, expect_output):
    set_tz(time_zone)
    time.tzset()
    datetime_ = utils.to_utc_datetime(time_isoformat)
    assert_that(utils.datetime_to_epoch(datetime_)).is_equal_to(expect_output)