def create_alias(self, scope: Construct, subdomain_name): ARecord( scope, f"{subdomain_name}ARecord", target=RecordTarget.from_alias(DomainAlias(self.subdomain_name)), zone=dns.get_hosted_zone(), record_name=dns.subdomain_to_fqdn(subdomain_name), ) AaaaRecord( scope, f"{subdomain_name}AAAARecord", target=RecordTarget.from_alias(DomainAlias(self.subdomain_name)), zone=dns.get_hosted_zone(), record_name=dns.subdomain_to_fqdn(subdomain_name), )
def add_dns_records(self, zone: IHostedZone, resource_name: str) -> None: r53.ARecord( self, 'DnsRecord', zone=zone, comment='Name Record for ' + JumpBoxConstruct.__name__, record_name='{}.{}'.format(resource_name, zone.zone_name), target=RecordTarget(values=[self.instance.instance_private_ip]))
def create_route53(stack, domain, distrubution): hosted_zone = HostedZone.from_lookup( stack, 'RadiantLoungeZone', domain_name=domain ) ARecord( stack, f'RadiantLoungeARecord', zone=hosted_zone, target=RecordTarget.from_alias(CloudFrontTarget(distrubution)) ) AaaaRecord( stack, f'RadiantLoungeAaaaRecord', zone=hosted_zone, target=RecordTarget.from_alias(CloudFrontTarget(distrubution)) )
def __init__(self, scope: core.Construct, id: str, hosted_zone: IHostedZone, domain_name: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) certificate = DnsValidatedCertificate(self, 'Certificate', domain_name=f'*.{domain_name}', subject_alternative_names=[domain_name], hosted_zone=hosted_zone) bucket = Bucket(self, 'SiteBucket', bucket_name=domain_name, website_index_document='index.html', public_read_access=True, removal_policy=core.RemovalPolicy.DESTROY) cloudfront_distribution = CloudFrontWebDistribution( self, 'CloudFrontDistribution', origin_configs=[ SourceConfiguration( custom_origin_source=CustomOriginConfig( domain_name=bucket.bucket_website_domain_name, origin_protocol_policy=OriginProtocolPolicy.HTTP_ONLY, ), behaviors=[ Behavior(is_default_behavior=True, default_ttl=core.Duration.hours(1)) ], ), ], alias_configuration=AliasConfiguration( acm_cert_ref=certificate.certificate_arn, names=[domain_name], ) ) ARecord(self, 'DefaultRecord', target=RecordTarget(alias_target=CloudFrontTarget( distribution=cloudfront_distribution)), zone=hosted_zone, ttl=core.Duration.hours(1)) BucketDeployment(self, 'DeployWebsite', sources=[Source.asset('./site/public')], destination_bucket=bucket, distribution=cloudfront_distribution) core.CfnOutput(self, 'CloudFrontDomain', value=cloudfront_distribution.distribution_domain_name) core.CfnOutput(self, 'BucketName', value=bucket.bucket_name)
def __init__(self, scope: Construct, id: str, *, fqdn: str, target) -> None: super().__init__(scope, id) hosted_zone_name = dns.get_hosted_zone_name() if not fqdn.endswith(hosted_zone_name): raise Exception(f"FQDN {fqdn} not within {hosted_zone_name}") record_name = fqdn[0:-len(hosted_zone_name) - 1] route53.ARecord( self, id, target=RecordTarget.from_alias(target), zone=dns.get_hosted_zone(), record_name=record_name, )
def __init__(self, scope: Construct, app_id: str, **kwargs) -> None: super().__init__(scope, app_id, **kwargs) # bucket with ui contents can be reached over listener rule on ALB api_domain_name = "static." + AWS_CONF["private_base_domain"] host_domain = f"{AWS_CONF['app_name']}.{AWS_CONF['private_base_domain']}" s3_path = AWS_CONF["app_name"] if AWS_CONF["deployment_stage"] == "tst": host_domain = f"{AWS_CONF['branch_id']}." + host_domain s3_path += "-" + AWS_CONF["branch_path"] ui_bucket = Bucket.from_bucket_name( self, "UiBucket", bucket_name=AWS_CONF["optionals"]["ui_bucket"], ) BucketDeployment( self, "UiBucketDepl", destination_bucket=ui_bucket, destination_key_prefix=s3_path, sources=[Source.asset(AWS_CONF["optionals"]["node_build_path"])], ) # ALB rule for http redirect to https load_balancer_arn = Arn.format( components=ArnComponents( service="elasticloadbalancing", partition="aws", resource="loadbalancer/app", resource_name=AWS_CONF["optionals"]["alb"], ), stack=self, ) alb = ApplicationLoadBalancer.from_lookup( self, "AlbApi", load_balancer_arn=load_balancer_arn, ) listener_http = ApplicationListener.from_lookup( self, "AlbHttpListenerRule", load_balancer_arn=alb.load_balancer_arn, listener_port=80, ) # listener rule priority is mandatory input and needs to be looked up # if cdk context not set yet set fixed priority during cdk synth priority = 1 if AWS_CONF["env"]["account"] in listener_http.listener_arn: priority = _next_elb_priority(host_domain, listener_http.listener_arn) # the rule is added to the existing listener ApplicationListenerRule( self, f"ListenerRule{AWS_CONF['branch_id'].capitalize()}", listener=listener_http, priority=priority, action=ListenerAction.redirect( host=api_domain_name, path=f"/ui/{s3_path}/index.html", permanent=True, port="443", protocol="HTTPS", ), conditions=[ListenerCondition.host_headers([host_domain])], ) # route 53 private zone with listener rule for redirect to alb ARecord( self, f"ARecord{AWS_CONF['branch_id'].capitalize()}", record_name=host_domain, target=RecordTarget(alias_target=LoadBalancerTarget(alb)), zone=PrivateHostedZone.from_lookup( self, "PrivZoneCorp", domain_name=AWS_CONF["private_base_domain"], private_zone=True, vpc_id=AWS_CONF["optionals"]["vpc"], ), )
def __init__(self, scope: core.Construct, construct_id: str, cert_arn: str, hosted_zone_id: str, domain_name: str, **kwargs) -> None: """ :param cert_arn: ARN of certificate to use :param hosted_zone_id: ID of hosted zone to use :param domain_name: Domain name to use """ super().__init__(scope, construct_id, **kwargs) ################################## # WEBSITE HOSTING INFRASTRUCTURE # ################################## # Grab hosted zone for the website to contain our records and an SSL certificate for HTTPS. These two have to # be grabbed from existing resources instead of created here because CloudFormation will time out waiting for a # newly-created cert to validate. self.hosted_zone = PublicHostedZone.from_public_hosted_zone_id( self, "personal-site-hosted-zone", hosted_zone_id) self.cert = Certificate.from_certificate_arn(self, "personal-site-cert", cert_arn) # Add an S3 bucket to host the website content self.website_bucket = Bucket(self, "personal-site-bucket", bucket_name=domain_name, removal_policy=RemovalPolicy.DESTROY, public_read_access=True, website_index_document="index.html", website_error_document="index.html") # Create a cloudfront distribution for the site self.distribution = Distribution( self, "personal-site-cf-distribution", default_behavior={ "origin": S3Origin(self.website_bucket), "allowed_methods": AllowedMethods.ALLOW_GET_HEAD_OPTIONS, "viewer_protocol_policy": ViewerProtocolPolicy.REDIRECT_TO_HTTPS }, certificate=self.cert, minimum_protocol_version=SecurityPolicyProtocol.TLS_V1_2_2019, enable_ipv6=True, domain_names=[domain_name, f"www.{domain_name}"]) # Point traffic to base and www.base to the cloudfront distribution, for both IPv4 and IPv6 ARecord(self, "personal-site-a-record", zone=self.hosted_zone, record_name=f"{domain_name}.", target=RecordTarget.from_alias( CloudFrontTarget(self.distribution))) ARecord(self, "personal-site-a-record-www", zone=self.hosted_zone, target=RecordTarget.from_alias( CloudFrontTarget(self.distribution)), record_name=f"www.{domain_name}.") AaaaRecord(self, "personal-site-aaaa-record", zone=self.hosted_zone, record_name=f"{domain_name}.", target=RecordTarget.from_alias( CloudFrontTarget(self.distribution))) AaaaRecord(self, "personal-site-aaaa-record-www", zone=self.hosted_zone, target=RecordTarget.from_alias( CloudFrontTarget(self.distribution)), record_name=f"www.{domain_name}.") ############################# # WEBSITE CD INFRASTRUCTURE # ############################# # CodeBuild project to build the website self.code_build_project = \ Project(self, "personal-site-builder", project_name="PersonalWebsite", description="Builds & deploys a personal static website on changes from GitHub", source=Source.git_hub( owner="c7c8", repo="crmyers.dev", clone_depth=1, branch_or_ref="master", webhook_filters=[ FilterGroup.in_event_of(EventAction.PUSH, EventAction.PULL_REQUEST_MERGED).and_branch_is( "master")]), artifacts=Artifacts.s3(bucket=self.website_bucket, include_build_id=False, package_zip=False, path="/"), build_spec=BuildSpec.from_object_to_yaml({ "version": "0.2", "phases": { "install": { "runtime-versions": { "nodejs": 10, } }, "pre_build": { "commands": ["npm install"] }, "build": { "commands": [ "npm run-script build &&", f"aws cloudfront create-invalidation --distribution-id={self.distribution.distribution_id} --paths '/*'" ] } }, "artifacts": { "files": ["./*"], "name": ".", "discard-paths": "no", "base-directory": "dist/crmyers-dev" } })) self.code_build_project.role.add_to_policy( PolicyStatement( effect=Effect.ALLOW, resources=[ f"arn:aws:cloudfront::{self.account}:distribution/{self.distribution.distribution_id}" ], actions=['cloudfront:CreateInvalidation'])) # Set up an SNS topic for text message notifications self.deployment_topic = Topic(self, 'personal-site-deployment-topic', topic_name='WebsiteDeployments', display_name='Website Deployments') self.deployment_topic.add_subscription(SmsSubscription("+19255968684")) self.code_build_project.on_build_failed( "BuildFailed", target=targets.SnsTopic(self.deployment_topic, message=RuleTargetInput.from_text( "Build for crmyers.dev FAILED"))) self.code_build_project.on_build_succeeded( "BuildSucceeded", target=targets.SnsTopic(self.deployment_topic, message=RuleTargetInput.from_text( "Build for crmyers.dev SUCCEEDED")))
def __init__(self, app: App, id: str, env: Environment) -> None: super().__init__(app, id, env=env) # start by getting the DNS zone we're going to work with zone = HostedZone.from_lookup(self, "Dominick", domain_name=DOMAIN) # create a certificate for the web service which matches its hostname cert = Certificate(self, "Cletus", domain_name=HOSTNAME, validation=CertificateValidation.from_dns(zone)) # the services will live in a vpc, of course vpc = ec2.Vpc(self, "Virgil") # we're going to scale this web-service automatically asg = AutoScalingGroup( self, "Alice", vpc=vpc, user_data=http_service(), instance_type=ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE2, ec2.InstanceSize.MICRO), machine_image=ec2.AmazonLinuxImage( generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX_2)) # explicitly allow internal access from the vpc just to be safe asg.connections.allow_internally(Port.tcp(WEB_PORT), "web-service") asg.connections.allow_internally(Port.tcp(NOT_WEB), "not-web") # expose the scaling group ports and permit egress asg.connections.allow_from_any_ipv4(Port.tcp(WEB_PORT)) asg.connections.allow_from_any_ipv4(Port.tcp(NOT_WEB)) # create a health check for the not-web service that currently if NOT_WEB_HEALTH_CHECKS: # points to the not-web service checker = HealthCheck(interval=Duration.seconds(10), port=NOT_WEB, protocol=Protocol.TCP) else: # points to the web port where our demo server listens checker = HealthCheck(interval=Duration.seconds(10), port=str(WEB_PORT), protocol=WEB_PROT) # put the scaling group behind a network target group for the LB notwebish = NetworkTargetGroup(self, "Allison", vpc=vpc, health_check=checker, targets=[asg], port=NOT_WEB, protocol=Protocol.TCP) # for the web-like ports, we can use the default health check webish = NetworkTargetGroup( self, "Alicen", vpc=vpc, health_check=HealthCheck(interval=Duration.seconds(10)), targets=[asg], port=WEB_PORT, protocol=WEB_PROT) if True: # create the load balancer and put it into dns lb = NetworkLoadBalancer(self, "Lisa", vpc=vpc, internet_facing=True) # create a hostname for the service CnameRecord(self, "Carl", domain_name=lb.load_balancer_dns_name, zone=zone, record_name=HOSTNAME.split('.')[0], ttl=Duration.seconds(60)) else: # a multi-step deployment could allow using an alias in R53 lb = NetworkLoadBalancer.from_network_load_balancer_attributes( self, "Larry", vpc=vpc, load_balancer_arn=some.load_balancer_arn, load_balancer_dns_name=HOSTNAME, load_balancer_canonical_hosted_zone_id=zone.hosted_zone_id) # create a hostname for the service AaaaRecord(self, "Eric", zone=zone, record_name=HOSTNAME.split('.')[0], target=RecordTarget.from_alias(LoadBalancerTarget(lb))) # point the load balancer to the target group for the ssl service # # TODO: determine if we need to use the same cert for pub-facing # and internal service listener_cert = ListenerCertificate(cert.certificate_arn) lb.add_listener("Cecil", port=443, certificates=[listener_cert], default_target_groups=[webish]) # point the load balancer to the target group for the web service lb.add_listener("Webster", port=80, default_target_groups=[webish]) # point the load balancer to the group for the not-web service lb.add_listener("NotWeb", default_target_groups=[notwebish], port=NOT_WEB, protocol=Protocol.TCP) # auto scale the, uh, autoscaling group asg.scale_on_cpu_utilization("ScaleCPU", target_utilization_percent=80) # emit some output values, largely for console use CfnOutput(self, "LB", export_name="LB", value=lb.load_balancer_dns_name) CfnOutput(self, "HTTP", export_name="HTTP", value="http://{}/".format(HOSTNAME)) CfnOutput(self, "HTTPS", export_name="HTTPS", value="https://{}/".format(HOSTNAME)) CfnOutput(self, "TCP", export_name="TCP", value="tcp://{}:{}/".format(HOSTNAME, NOT_WEB)) CfnOutput(self, "Cert", export_name="Cert", value=cert.certificate_arn)
def __init__( self, scope: Construct, id: str, cluster: ICluster, ecs_security_group: SecurityGroup, ecs_source_security_group: SecurityGroup, vpc: IVpc, **kwargs, ) -> None: super().__init__(scope, id, **kwargs) global g_nlb Tags.of(self).add("Stack", "Common-Nlb") # TODO -- You need to do some manual actions: # TODO -- 1) enable auto-assign IPv6 address on public subnets # TODO -- 2) add to the Outbound rules of "Live-Common-Nlb/ASG/InstanceSecurityGroup" the destination "::/0" self.private_zone = HostedZone.from_lookup( self, "PrivateZone", domain_name="openttd.internal", private_zone=True, ) user_data = UserData.for_linux(shebang="#!/bin/bash -ex") asset = Asset(self, "NLB", path="user_data/nlb/") user_data.add_commands( "echo 'Extracting user-data files'", "mkdir /nlb", "cd /nlb", ) user_data.add_s3_download_command( bucket=asset.bucket, bucket_key=asset.s3_object_key, local_file="/nlb/files.zip", ) user_data.add_commands("unzip files.zip", ) user_data.add_commands( "echo 'Setting up configuration'", f"echo '{self.region}' > /etc/.region", f"echo '{cluster.cluster_name}' > /etc/.cluster", ) user_data.add_commands( "echo 'Installing nginx'", "amazon-linux-extras install epel", "yum install nginx -y", "cp /nlb/nginx.conf /etc/nginx/nginx.conf", "mkdir /etc/nginx/nlb.d", ) user_data.add_commands( "echo 'Installing Python3'", "yum install python3 -y", "python3 -m venv /venv", "/venv/bin/pip install -r /nlb/requirements.txt", ) user_data.add_commands( "echo 'Generating nginx configuration'", "cd /etc/nginx/nlb.d", "/venv/bin/python /nlb/nginx.py", "systemctl start nginx", ) user_data.add_commands( "echo 'Setting up SOCKS proxy'", "useradd pproxy", "cp /nlb/pproxy.service /etc/systemd/system/", "systemctl daemon-reload", "systemctl enable pproxy.service", "systemctl start pproxy.service", ) asg = AutoScalingGroup( self, "ASG", vpc=vpc, instance_type=InstanceType("t3a.nano"), machine_image=MachineImage.latest_amazon_linux( generation=AmazonLinuxGeneration.AMAZON_LINUX_2), min_capacity=2, vpc_subnets=SubnetSelection(subnet_type=SubnetType.PUBLIC, one_per_az=True), user_data=user_data, health_check=HealthCheck.elb(grace=Duration.seconds(0)), ) asg.add_security_group(ecs_security_group) asg.role.add_managed_policy( ManagedPolicy.from_aws_managed_policy_name( "AmazonSSMManagedInstanceCore")) asset.grant_read(asg.role) policy = ManagedPolicy(self, "Policy") policy_statement = PolicyStatement( actions=[ "ec2:DescribeInstances", "ecs:DescribeContainerInstances", "ecs:DescribeTasks", "ecs:ListContainerInstances", "ecs:ListServices", "ecs:ListTagsForResource", "ecs:ListTasks", ], resources=["*"], ) policy.add_statements(policy_statement) asg.role.add_managed_policy(policy) # We could also make an additional security-group and add that to # the ASG, but it keeps adding up. This makes it a tiny bit # easier to get an overview what traffic is allowed from the # console on AWS. assert isinstance(asg.node.children[0], SecurityGroup) self.security_group = asg.node.children[0] listener_https.add_targets( subdomain_name=self.admin_subdomain_name, port=80, target=asg, priority=2, ) # Create a Security Group so the lambdas can access the EC2. # This is needed to check if the EC2 instance is fully booted. lambda_security_group = SecurityGroup( self, "LambdaSG", vpc=vpc, ) self.security_group.add_ingress_rule( peer=lambda_security_group, connection=Port.tcp(80), description="Lambda to target", ) self.security_group.add_ingress_rule( peer=ecs_source_security_group, connection=Port.udp(8080), description="ECS to target", ) self.create_ecs_lambda( cluster=cluster, auto_scaling_group=asg, ) self.create_asg_lambda( lifecycle_transition=LifecycleTransition.INSTANCE_LAUNCHING, timeout=Duration.seconds(180), vpc=vpc, security_group=lambda_security_group, auto_scaling_group=asg, ) self.create_asg_lambda( lifecycle_transition=LifecycleTransition.INSTANCE_TERMINATING, timeout=Duration.seconds(30), vpc=vpc, security_group=lambda_security_group, auto_scaling_group=asg, ) # Initialize the NLB record on localhost, as we need to be able to # reference it for other entries to work correctly. ARecord( self, "ARecord", target=RecordTarget.from_ip_addresses("127.0.0.1"), zone=dns.get_hosted_zone(), record_name=self.subdomain_name, ttl=Duration.seconds(60), ) AaaaRecord( self, "AAAARecord", target=RecordTarget.from_ip_addresses("::1"), zone=dns.get_hosted_zone(), record_name=self.subdomain_name, ttl=Duration.seconds(60), ) # To make things a bit easier, also alias to staging. self.create_alias(self, "nlb.staging") # Create a record for the internal DNS ARecord( self, "APrivateRecord", target=RecordTarget.from_ip_addresses("127.0.0.1"), zone=self.private_zone, record_name=self.subdomain_name, ttl=Duration.seconds(60), ) if g_nlb is not None: raise Exception("Only a single NlbStack instance can exist") g_nlb = self
async def setup(self, id:str, domain: str) -> None: print('SetupDomain: {}'.format(domain)) bucket_task = create_task(self.create_site_bucket(id=id)) canary_task = create_task(self.create_canary_function(id=id)) self.metrics = AppMetrics(self, '{}Metrics'.format(id), app_name=domain, metrics=METRICS) logging_bucket = Bucket(self, '{}DistroLogBucket'.format( id), removal_policy=RemovalPolicy.DESTROY) log_config = LoggingConfiguration( bucket=logging_bucket, include_cookies=True) site_identity = CfnCloudFrontOriginAccessIdentity( self, '{}SiteCFIdentity'.format( id), cloud_front_origin_access_identity_config=CfnCloudFrontOriginAccessIdentity.CloudFrontOriginAccessIdentityConfigProperty( comment='Website Origin Identity')) self.site_bucket = await bucket_task origin = S3OriginConfig( s3_bucket_source=self.site_bucket, origin_access_identity_id=site_identity.ref ) self.distribution = CloudFrontWebDistribution( self, '{}SiteDistribution'.format(id), default_root_object='template.html', origin_configs=[ SourceConfiguration( s3_origin_source=origin, behaviors=[ Behavior( allowed_methods=CloudFrontAllowedMethods.GET_HEAD_OPTIONS, is_default_behavior=True, compress=True, default_ttl=Duration.seconds(30), )])], logging_config=log_config) cdn_name = self.distribution.domain_name self.site_function = await self.create_site_function(id=id, domain=domain, cdn_name=cdn_name) acd_id = '{}APICustomDomain'.format(id) await self.acd_setup(id=acd_id, domain=domain, sats=[domain, '*.{}'.format(domain)]) stage_options = StageOptions(cache_cluster_enabled=True, caching_enabled=True, cache_cluster_size='0.5', data_trace_enabled=True, cache_ttl=Duration.seconds(30), metrics_enabled=True, tracing_enabled=True, logging_level=MethodLoggingLevel.INFO) proxy_setting = False self.api = LambdaRestApi( self, '{}API'.format(id), domain_name=self.dno, handler=self.site_function, deploy_options = stage_options, minimum_compression_size=2000, endpoint_types=[ EndpointType.EDGE], cloud_watch_role=False, policy=MINIMAL_PUBLIC_API_POLICY_DOCUMENT, deploy=True, default_method_options={ 'authorizationType': AuthorizationType.NONE}, proxy=proxy_setting ) self.api.root.add_method( http_method='GET', integration=LambdaIntegration(self.site_function), authorization_type=AuthorizationType.NONE) for resource, methods in self.api_resources.items(): added_resource = self.api.root.add_resource(resource) print('Added Resource: {}'.format(resource)) tasks = set() for method in methods: tasks.add(create_task(self.create_api_method(resource=resource, added_resource=added_resource, method=method))) for task in tasks: await task self.www_dn = DomainName(self, '{}WWWCDN'.format(id), mapping=self.api, certificate=self.cert, domain_name='www.{}'.format(domain), endpoint_type=EndpointType.EDGE) target = ApiGateway(self.api) self.dns_record = ARecord( self, '{}DNSRecord'.format(id), target=RecordTarget(alias_target=target), zone=self.zone, record_name=domain) self.canary_function = await canary_task
def __init__(self, scope: Construct, id: str, *, deployment: Deployment, **kwargs) -> None: super().__init__(scope, id, **kwargs) Tags.of(self).add("Application", self.application_name) Tags.of(self).add("Deployment", deployment.value) hosted_zone = HostedZone.from_lookup( self, "Zone", domain_name="openttd.com", ) fqdn = "www.openttd.com" certificate = DnsValidatedCertificate( self, "OpenttdCom-Certificate", hosted_zone=hosted_zone, domain_name=fqdn, subject_alternative_names=["*.openttd.com", "openttd.com"], region="us-east-1", validation_method=ValidationMethod.DNS, ) func = lambda_edge.create_function( self, "OpenttdComRedirect", runtime=Runtime.NODEJS_10_X, handler="index.handler", code=Code.from_asset("./lambdas/openttd-com-redirect"), ) s3_cloud_front = S3CloudFront( self, "S3CloudFront", subdomain_name=fqdn, cert=CertificateResult(certificate, certificate.certificate_arn, fqdn), additional_fqdns=["*.openttd.com", "openttd.com"], lambda_function_associations=[ LambdaFunctionAssociation( event_type=LambdaEdgeEventType.ORIGIN_REQUEST, lambda_function=func, ), ], no_dns=True, ) S3CloudFrontPolicy( self, "S3cloudFrontPolicy", s3_cloud_front=s3_cloud_front, ) for record_name in ("www", None): route53.ARecord( self, f"{record_name}.openttd.com-ARecord", target=RecordTarget.from_alias(CloudFrontTarget(s3_cloud_front.distribution)), zone=hosted_zone, record_name=record_name, ) route53.AaaaRecord( self, f"{record_name}.openttd.com-AaaaRecord", target=RecordTarget.from_alias(CloudFrontTarget(s3_cloud_front.distribution)), zone=hosted_zone, record_name=record_name, )