Пример #1
0
    def bucket(self):
        """Return a bucket configuration."""
        if self.__bucket:
            return self.__bucket

        if self.__source_config_text:
            # get custom_s3_bucket in create command
            custom_bucket_name = self.config.custom_s3_bucket
        else:
            # get custom_s3_bucket in delete, update commands
            custom_bucket_name = self.stack.s3_bucket_name
            if custom_bucket_name == S3Bucket.get_bucket_name(
                    AWSApi.instance().sts.get_account_id(), get_region()):
                custom_bucket_name = None

        try:
            self.__bucket = S3BucketFactory.init_s3_bucket(
                service_name=self.name,
                stack_name=self.stack_name,
                custom_s3_bucket=custom_bucket_name,
                artifact_directory=self.s3_artifacts_dir,
            )
        except AWSClientError as e:
            raise _cluster_error_mapper(
                e, f"Unable to initialize s3 bucket. {e}")

        return self.__bucket
Пример #2
0
def get_templates_bucket_path():
    """Return a string containing the path of bucket."""
    region = get_region()
    s3_suffix = ".cn" if region.startswith("cn") else ""
    return (
        f"https://{region}-aws-parallelcluster.s3.{region}.amazonaws.com{s3_suffix}/"
        f"parallelcluster/{get_installed_version()}/templates/")
Пример #3
0
 def _validate(self, auto_import_policy, import_path):
     if auto_import_policy is not None:
         bucket = get_bucket_name_from_s3_url(import_path)
         if AWSApi.instance().s3.get_bucket_region(bucket) != get_region():
             self._add_failure(
                 "FSx auto import is not supported for cross-region buckets.",
                 FailureLevel.ERROR)
Пример #4
0
    def __init__(self,
                 resource_id,
                 log_group_name,
                 bucket,
                 output_dir,
                 bucket_prefix=None,
                 keep_s3_objects=False):
        # check bucket
        bucket_region = AWSApi.instance().s3.get_bucket_region(
            bucket_name=bucket)
        if bucket_region != get_region():
            raise LogsExporterError(
                f"The bucket used for exporting logs must be in the same region as the {resource_id}. "
                f"The given resource is in {get_region()}, but the bucket's region is {bucket_region}."
            )
        self.bucket = bucket
        self.log_group_name = log_group_name
        self.output_dir = output_dir
        self.keep_s3_objects = keep_s3_objects

        if bucket_prefix:
            self.bucket_prefix = bucket_prefix
            self.delete_everything_under_prefix = False
        else:
            # If the default bucket prefix is being used and there's nothing underneath that prefix already
            # then we can delete everything under that prefix after downloading the data
            # (unless keep-s3-objects is specified)
            self.bucket_prefix = f"{resource_id}-logs-{datetime.datetime.now().strftime('%Y%m%d%H%M')}"
            self.delete_everything_under_prefix = AWSApi.instance(
            ).s3_resource.is_empty(bucket, self.bucket_prefix)
 def _get_log_group_arn(self):
     log_group_arn = self.format_arn(
         service="logs",
         resource="log-group",
         region=get_region(),
         sep=":",
         resource_name=f"/aws/imagebuilder/{self._build_image_recipe_name()}",
     )
     return log_group_arn
Пример #6
0
 def __init__(self, stack: ClusterStack):
     # Cluster info
     self.name = stack.cluster_name
     self.region = get_region()
     self.version = stack.version
     self.scheduler = stack.scheduler
     self.status = stack.status  # FIXME cluster status should be different from stack status
     # Stack info
     self.stack_arn = stack.id
     self.stack_name = stack.name
     self.stack_status = stack.status
     self.stack_outputs = stack.outputs
Пример #7
0
 def describe_image_by_imagebuilder_arn_tag(self, image_id: str):
     """Return a dict of image info by searching imagebuilder arn tag."""
     partition = get_partition()
     region = get_region()
     name = "{0}-{1}".format(IMAGEBUILDER_RESOURCE_NAME_PREFIX, image_id)[0:1024].lower()
     filters = [
         {
             "Name": "tag:" + IMAGEBUILDER_ARN_TAG,
             "Values": [f"arn:{partition}:imagebuilder:{region}:*:image/{name}*"],
         }
     ]
     owners = ["self"]
     return self.describe_images(ami_ids=[], filters=filters, owners=owners)[0]
Пример #8
0
def _get_keys():
    """Return a list of keys."""
    keypairs = boto3.client("ec2").describe_key_pairs()
    key_options = []
    for key in keypairs.get("KeyPairs"):
        key_name = key.get("KeyName")
        key_options.append(key_name)

    if not key_options:
        print(
            "No KeyPair found in region {0}, please create one following the guide: "
            "https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html"
            .format(get_region()))

    return key_options
Пример #9
0
def _ssh(args, extra_args):
    # pylint: disable=import-outside-toplevel
    """
    Execute an SSH command to the head node instance, according to the [aliases] section if there.

    :param args: pcluster CLI args
    :param extra_args: pcluster CLI extra_args
    """
    try:
        try:
            from shlex import quote as cmd_quote
        except ImportError:
            from pipes import quote as cmd_quote

        result = PclusterApi().describe_cluster_instances(
            cluster_name=args.cluster_name,
            region=get_region(),
            node_type=NodeType.HEAD_NODE)
        if isinstance(result, list) and len(result) == 1:
            # build command
            cmd = "ssh {CFN_USER}@{HEAD_NODE_IP} {ARGS}".format(
                CFN_USER=result[0].user,
                HEAD_NODE_IP=result[0].public_ip_address
                or result[0].private_ip_address,
                ARGS=" ".join(cmd_quote(str(arg)) for arg in extra_args),
            )

            # run command
            log_message = "SSH command: {0}".format(cmd)
            if not args.dryrun:
                LOGGER.debug(log_message)
                # A nosec comment is appended to the following line in order to disable the B605 check.
                # This check is disabled for the following reasons:
                # - The args passed to the remote command are sanitized.
                # - The default command to which these args is known.
                # - Users have full control over any customization of the command to which args are passed.
                os.system(cmd)  # nosec nosemgrep
            else:
                print(log_message)
        else:
            utils.error(
                f"Unable to connect to the cluster {args.cluster_name}.\n{result.message}"
            )

    except KeyboardInterrupt:
        print("\nExiting...")
        sys.exit(0)
Пример #10
0
def _dcv_connect(args):
    """
    Execute pcluster dcv connect command.

    :param args: pcluster cli arguments.
    """
    result = PclusterApi().describe_cluster_instances(
        cluster_name=args.cluster_name,
        region=get_region(),
        node_type=NodeType.HEAD_NODE)
    if isinstance(result, list) and len(result) == 1:
        head_node_ip = result[0].public_ip_address or result[
            0].private_ip_address
        # Prepare ssh command to execute in the head node instance
        cmd = 'ssh {CFN_USER}@{HEAD_NODE_IP} {KEY} "{REMOTE_COMMAND} /home/{CFN_USER}"'.format(
            CFN_USER=result[0].user,
            HEAD_NODE_IP=head_node_ip,
            KEY="-i {0}".format(args.key_path) if args.key_path else "",
            REMOTE_COMMAND=DCV_CONNECT_SCRIPT,
        )

        try:
            url = _retry(_retrieve_dcv_session_url,
                         func_args=[cmd, args.cluster_name, head_node_ip],
                         attempts=4)
            url_message = "Please use the following one-time URL in your browser within 30 seconds:\n{0}".format(
                url)

            if args.show_url:
                print(url_message)
                return

            try:
                if not webbrowser.open_new(url):
                    raise webbrowser.Error("Unable to open the Web browser.")
            except webbrowser.Error as e:
                print("%s\n%s", e, url_message)

        except DCVConnectionError as e:
            error(
                "Something went wrong during DCV connection.\n{0}"
                "Please check the logs in the /var/log/parallelcluster/ folder "
                "of the head node and submit an issue {1}\n".format(
                    e, PCLUSTER_ISSUES_LINK))
    else:
        error(f"Unable to connect to the cluster.\n{result.message}")
Пример #11
0
def automate_vpc_with_subnet_creation(network_configuration,
                                      compute_subnet_size):
    print(
        "Beginning VPC creation. Please do not leave the terminal until the creation is finalized"
    )
    vpc_creator = VpcFactory(get_region())
    vpc_id = vpc_creator.create()
    vpc_creator.setup(vpc_id, name="ParallelClusterVPC" + TIMESTAMP)
    if not vpc_creator.check(vpc_id):
        logging.critical(
            "Something went wrong in VPC creation. Please delete it and start the process again"
        )
        sys.exit(1)

    vpc_parameters = {"vpc_id": vpc_id}
    vpc_parameters.update(
        automate_subnet_creation(vpc_id, network_configuration,
                                 compute_subnet_size))
    return vpc_parameters
Пример #12
0
    def _register_validators(self):
        # Volume size validator only validates specified volume size
        if self.image and self.image.root_volume and self.image.root_volume.size:
            self._register_validator(
                EbsVolumeTypeSizeValidator,
                volume_type=ROOT_VOLUME_TYPE,
                volume_size=self.image.root_volume.size,
            )
            self._register_validator(
                AMIVolumeSizeValidator,
                volume_size=self.image.root_volume.size,
                image=self.build.parent_image,
            )

        if self.custom_s3_bucket:
            self._register_validator(S3BucketValidator,
                                     bucket=self.custom_s3_bucket)
            self._register_validator(S3BucketRegionValidator,
                                     bucket=self.custom_s3_bucket,
                                     region=get_region())
Пример #13
0
def get_templates_bucket_path():
    """Return a string containing the path of bucket."""
    region = get_region()
    s3_suffix = ".cn" if region.startswith("cn") else ""
    return "https://{REGION}-aws-parallelcluster.s3.{REGION}.amazonaws.com{S3_SUFFIX}/templates/".format(
        REGION=region, S3_SUFFIX=s3_suffix)
    def _add_lambda_cleanup(self, policy_statements, build_tags):
        lambda_cleanup_execution_role = None
        if self.custom_cleanup_lambda_role:
            execution_role = self.custom_cleanup_lambda_role
        else:
            # LambdaCleanupPolicies
            self._add_resource_delete_policy(
                policy_statements,
                ["cloudformation:DeleteStack"],
                [
                    self.format_arn(
                        service="cloudformation",
                        resource="stack",
                        resource_name="{0}/{1}".format(
                            self.image_id, self._stack_unique_id()),
                    )
                ],
            )

            self._add_resource_delete_policy(
                policy_statements,
                ["ec2:CreateTags"],
                [
                    self.format_arn(
                        service="ec2",
                        account="",
                        resource="image",
                        region=region,
                        resource_name="*",
                    ) for region in self._get_distribution_regions()
                ],
            )

            self._add_resource_delete_policy(
                policy_statements,
                ["tag:TagResources"],
                ["*"],
            )

            self._add_resource_delete_policy(
                policy_statements,
                [
                    "iam:DetachRolePolicy", "iam:DeleteRole",
                    "iam:DeleteRolePolicy"
                ],
                [
                    self.format_arn(
                        service="iam",
                        resource="role",
                        region="",
                        resource_name="{0}/{1}".format(
                            IAM_ROLE_PATH.strip("/"),
                            self._build_resource_name(
                                IMAGEBUILDER_RESOURCE_NAME_PREFIX + "Cleanup"),
                        ),
                    )
                ],
            )

            self._add_resource_delete_policy(
                policy_statements,
                ["lambda:DeleteFunction", "lambda:RemovePermission"],
                [
                    self.format_arn(
                        service="lambda",
                        resource="function",
                        sep=":",
                        resource_name=self._build_resource_name(
                            IMAGEBUILDER_RESOURCE_NAME_PREFIX),
                    )
                ],
            )

            self._add_resource_delete_policy(
                policy_statements,
                ["logs:DeleteLogGroup"],
                [
                    self.format_arn(
                        service="logs",
                        resource="log-group",
                        sep=":",
                        resource_name="/aws/lambda/{0}:*".format(
                            self._build_resource_name(
                                IMAGEBUILDER_RESOURCE_NAME_PREFIX)),
                    )
                ],
            )

            self._add_resource_delete_policy(
                policy_statements,
                ["iam:RemoveRoleFromInstanceProfile"],
                [
                    self.format_arn(
                        service="iam",
                        resource="instance-profile",
                        region="",
                        resource_name="{0}/{1}".format(
                            IAM_ROLE_PATH.strip("/"),
                            self._build_resource_name(
                                IMAGEBUILDER_RESOURCE_NAME_PREFIX),
                        ),
                    )
                ],
            )

            self._add_resource_delete_policy(
                policy_statements,
                ["iam:DetachRolePolicy", "iam:DeleteRolePolicy"],
                [
                    self.format_arn(
                        service="iam",
                        resource="role",
                        region="",
                        resource_name="{0}/{1}".format(
                            IAM_ROLE_PATH.strip("/"),
                            self._build_resource_name(
                                IMAGEBUILDER_RESOURCE_NAME_PREFIX),
                        ),
                    )
                ],
            )

            self._add_resource_delete_policy(
                policy_statements,
                [
                    "SNS:GetTopicAttributes", "SNS:DeleteTopic",
                    "SNS:Unsubscribe"
                ],
                [
                    self.format_arn(
                        service="sns",
                        resource="{0}".format(
                            self._build_resource_name(
                                IMAGEBUILDER_RESOURCE_NAME_PREFIX)),
                    )
                ],
            )

            policy_document = iam.PolicyDocument(statements=policy_statements)
            managed_lambda_policy = [
                Fn.sub(
                    "arn:${AWS::Partition}:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole"
                ),
            ]

            # LambdaCleanupExecutionRole
            lambda_cleanup_execution_role = iam.CfnRole(
                self,
                "DeleteStackFunctionExecutionRole",
                managed_policy_arns=managed_lambda_policy,
                assume_role_policy_document=get_assume_role_policy_document(
                    "lambda.amazonaws.com"),
                path=IAM_ROLE_PATH,
                policies=[
                    iam.CfnRole.PolicyProperty(
                        policy_document=policy_document,
                        policy_name="LambdaCleanupPolicy",
                    ),
                ],
                tags=build_tags,
                role_name=self._build_resource_name(
                    IMAGEBUILDER_RESOURCE_NAME_PREFIX + "Cleanup"),
            )

            execution_role = lambda_cleanup_execution_role.attr_arn

        # LambdaCleanupEnv
        lambda_env = awslambda.CfnFunction.EnvironmentProperty(
            variables={"IMAGE_STACK_ARN": self.stack_id})

        # LambdaCWLogGroup
        lambda_log = logs.CfnLogGroup(
            self,
            "DeleteStackFunctionLog",
            log_group_name="/aws/lambda/{0}".format(
                self._build_resource_name(IMAGEBUILDER_RESOURCE_NAME_PREFIX)),
        )

        # LambdaCleanupFunction
        lambda_cleanup = awslambda.CfnFunction(
            self,
            "DeleteStackFunction",
            function_name=self._build_resource_name(
                IMAGEBUILDER_RESOURCE_NAME_PREFIX),
            code=awslambda.CfnFunction.CodeProperty(
                s3_bucket=self.config.custom_s3_bucket
                or S3Bucket.get_bucket_name(
                    AWSApi.instance().sts.get_account_id(), get_region()),
                s3_key=self.bucket.get_object_key(S3FileType.CUSTOM_RESOURCES,
                                                  "artifacts.zip"),
            ),
            handler="delete_image_stack.handler",
            memory_size=128,
            role=execution_role,
            runtime="python3.8",
            timeout=900,
            environment=lambda_env,
            tags=build_tags,
        )
        permission = awslambda.CfnPermission(
            self,
            "DeleteStackFunctionPermission",
            action="lambda:InvokeFunction",
            principal="sns.amazonaws.com",
            function_name=lambda_cleanup.attr_arn,
            source_arn=Fn.ref("BuildNotificationTopic"),
        )
        lambda_cleanup.add_depends_on(lambda_log)

        return lambda_cleanup, permission, lambda_cleanup_execution_role, lambda_log
Пример #15
0
 def region(self):
     """Return bucket region."""
     if self.__region is None:
         self.__region = get_region()
     return self.__region
Пример #16
0
def _validate_vpc(vpc_id):
    # This function should be further expandend once we decide to allow the user to use his vpcs. For example, we should
    # also check for the presence of a NAT gateway
    if not VpcFactory(get_region()).check(vpc_id):
        logging.error(
            "WARNING: The VPC does not have the correct parameters set.")
Пример #17
0
def replace_url_parameters(url):
    """Replace ${Region} and ${URLSuffix} in url."""
    return url.replace("${Region}",
                       get_region()).replace("${URLSuffix}",
                                             get_url_domain_suffix())
Пример #18
0
def get_partition():
    """Get partition for the region set in the environment."""
    return next(("aws-" + partition for partition in ["us-gov", "cn"]
                 if get_region().startswith(partition)), "aws")
Пример #19
0
 def __init__(self, imagebuilder: ImageBuilder):
     self.stack_exist = False
     self.image_exist = False
     self.region = get_region()
     # image config file url
     self.image_configuration = imagebuilder.config_url
Пример #20
0
 def _get_log_group_arn(self):
     """Get log group arn."""
     return "arn:{0}:logs:{1}:{2}:log-group:{3}".format(
         get_partition(), get_region(), AWSApi.instance().sts.get_account_id(), self._log_group_name
     )
Пример #21
0
    def _get_custom_bucket(self):
        """Try to get custom bucket name from image tag or stack tag."""
        custom_bucket_name = None
        try:
            custom_bucket_name = self.image.s3_bucket_name
        except ImageError as e:
            if not isinstance(e, NonExistingImageError):
                raise _imagebuilder_error_mapper(e, f"Unable to get S3 bucket name from image {self.image_id}. {e}")

        if custom_bucket_name is None:
            try:
                custom_bucket_name = self.stack.s3_bucket_name
            except StackError as e:
                raise _imagebuilder_error_mapper(e, f"Unable to get S3 bucket name from image {self.image_id}. {e}")

        return (
            custom_bucket_name
            if custom_bucket_name != S3Bucket.get_bucket_name(AWSApi.instance().sts.get_account_id(), get_region())
            else None
        )