def __init__( self, scope: core.Construct, id: str, vpc_stack, logstash_ec2=True, logstash_fargate=True, **kwargs, ) -> None: super().__init__(scope, id, **kwargs) # get s3 bucket name s3client = boto3.client("s3") s3_bucket_list = s3client.list_buckets() s3_bucket_name = "" for bkt in s3_bucket_list["Buckets"]: try: bkt_tags = s3client.get_bucket_tagging( Bucket=bkt["Name"])["TagSet"] for keypairs in bkt_tags: if (keypairs["Key"] == "aws:cloudformation:stack-name" and keypairs["Value"] == "elkk-athena"): s3_bucket_name = bkt["Name"] except ClientError as err: if err.response["Error"]["Code"] in [ "NoSuchTagSet", "NoSuchBucket" ]: pass else: print(f"Unexpected error: {err}") # get elastic endpoint esclient = boto3.client("es") es_domains = esclient.list_domain_names() try: es_domain = [ dom["DomainName"] for dom in es_domains["DomainNames"] if "elkk-" in dom["DomainName"] ][0] es_endpoint = esclient.describe_elasticsearch_domain( DomainName=es_domain) es_endpoint = es_endpoint["DomainStatus"]["Endpoints"]["vpc"] except IndexError: es_endpoint = "" # assets for logstash stack logstash_yml = assets.Asset(self, "logstash_yml", path=os.path.join(dirname, "logstash.yml")) logstash_repo = assets.Asset(self, "logstash_repo", path=os.path.join(dirname, "logstash.repo")) # update conf file to .asset # kafka brokerstring does not need reformatting logstash_conf_asset = file_updated( os.path.join(dirname, "logstash.conf"), { "$s3_bucket": s3_bucket_name, "$es_endpoint": es_endpoint, "$kafka_brokers": kafka_get_brokers(), "$elkk_region": os.environ["CDK_DEFAULT_REGION"], }, ) logstash_conf = assets.Asset( self, "logstash.conf", path=logstash_conf_asset, ) # logstash security group logstash_security_group = ec2.SecurityGroup( self, "logstash_security_group", vpc=vpc_stack.get_vpc, description="logstash security group", allow_all_outbound=True, ) core.Tags.of(logstash_security_group).add("project", constants["PROJECT_TAG"]) core.Tags.of(logstash_security_group).add("Name", "logstash_sg") # Open port 22 for SSH logstash_security_group.add_ingress_rule( ec2.Peer.ipv4(f"{external_ip}/32"), ec2.Port.tcp(22), "from own public ip", ) # get security group for kafka ec2client = boto3.client("ec2") security_groups = ec2client.describe_security_groups(Filters=[{ "Name": "tag-value", "Values": [constants["PROJECT_TAG"]] }], ) # if kafka sg does not exist ... don't add it try: kafka_sg_id = [ sg["GroupId"] for sg in security_groups["SecurityGroups"] if "kafka security group" in sg["Description"] ][0] kafka_security_group = ec2.SecurityGroup.from_security_group_id( self, "kafka_security_group", security_group_id=kafka_sg_id) # let in logstash kafka_security_group.connections.allow_from( logstash_security_group, ec2.Port.all_traffic(), "from logstash", ) except IndexError: # print("kafka_sg_id and kafka_security_group not found") pass # get security group for elastic try: elastic_sg_id = [ sg["GroupId"] for sg in security_groups["SecurityGroups"] if "elastic security group" in sg["Description"] ][0] elastic_security_group = ec2.SecurityGroup.from_security_group_id( self, "elastic_security_group", security_group_id=elastic_sg_id) # let in logstash elastic_security_group.connections.allow_from( logstash_security_group, ec2.Port.all_traffic(), "from logstash", ) except IndexError: pass # elastic policy access_elastic_policy = iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ "es:ListDomainNames", "es:DescribeElasticsearchDomain", "es:ESHttpPut", ], resources=["*"], ) # kafka policy access_kafka_policy = iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=["kafka:ListClusters", "kafka:GetBootstrapBrokers"], resources=["*"], ) # s3 policy access_s3_policy = iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=["s3:ListBucket", "s3:PutObject"], resources=["*"], ) # create the Logstash instance if logstash_ec2: # userdata for Logstash logstash_userdata = user_data_init( log_group_name="elkk/logstash/instance") # create the instance logstash_instance = ec2.Instance( self, "logstash_client", instance_type=ec2.InstanceType(constants["LOGSTASH_INSTANCE"]), machine_image=ec2.AmazonLinuxImage( generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX_2), vpc=vpc_stack.get_vpc, vpc_subnets=SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC), key_name=constants["KEY_PAIR"], security_group=logstash_security_group, user_data=logstash_userdata, ) core.Tag.add(logstash_instance, "project", constants["PROJECT_TAG"]) # add access to the file assets logstash_yml.grant_read(logstash_instance) logstash_repo.grant_read(logstash_instance) logstash_conf.grant_read(logstash_instance) # add permissions to instance logstash_instance.add_to_role_policy( statement=access_elastic_policy) logstash_instance.add_to_role_policy(statement=access_kafka_policy) logstash_instance.add_to_role_policy(statement=access_s3_policy) # add log permissions instance_add_log_permissions(logstash_instance) # add commands to the userdata logstash_userdata.add_commands( # get setup assets files f"aws s3 cp s3://{logstash_yml.s3_bucket_name}/{logstash_yml.s3_object_key} /home/ec2-user/logstash.yml", f"aws s3 cp s3://{logstash_repo.s3_bucket_name}/{logstash_repo.s3_object_key} /home/ec2-user/logstash.repo", f"aws s3 cp s3://{logstash_conf.s3_bucket_name}/{logstash_conf.s3_object_key} /home/ec2-user/logstash.conf", # install java "amazon-linux-extras install java-openjdk11 -y", # install git "yum install git -y", # install pip "yum install python-pip -y", # get elastic output to es "git clone https://github.com/awslabs/logstash-output-amazon_es.git /home/ec2-user/logstash-output-amazon_es", # logstash "rpm --import https://artifacts.elastic.co/GPG-KEY-elasticsearch", # move logstash repo file "mv -f /home/ec2-user/logstash.repo /etc/yum.repos.d/logstash.repo", # get to the yum "yum install logstash -y", # add user to logstash group "usermod -a -G logstash ec2-user", # move logstash.yml to final location "mv -f /home/ec2-user/logstash.yml /etc/logstash/logstash.yml", # move logstash.conf to final location "mv -f /home/ec2-user/logstash.conf /etc/logstash/conf.d/logstash.conf", # move plugin "mkdir /usr/share/logstash/plugins", "mv -f /home/ec2-user/logstash-output-amazon_es /usr/share/logstash/plugins/logstash-output-amazon_es", # update gemfile """sed -i '5igem "logstash-output-amazon_es", :path => "/usr/share/logstash/plugins/logstash-output-amazon_es"' /usr/share/logstash/Gemfile""", # update ownership "chown -R logstash:logstash /etc/logstash", # start logstash "systemctl start logstash.service", ) # add the signal logstash_userdata.add_signal_on_exit_command( resource=logstash_instance) # add creation policy for instance logstash_instance.instance.cfn_options.creation_policy = core.CfnCreationPolicy( resource_signal=core.CfnResourceSignal(count=1, timeout="PT10M")) # fargate for logstash if logstash_fargate: # cloudwatch log group for containers logstash_logs_containers = logs.LogGroup( self, "logstash_logs_containers", log_group_name="elkk/logstash/container", removal_policy=core.RemovalPolicy.DESTROY, retention=logs.RetentionDays.ONE_WEEK, ) # docker image for logstash logstash_image_asset = ecr_assets.DockerImageAsset( self, "logstash_image_asset", directory=dirname # , file="Dockerfile" ) # create the fargate cluster logstash_cluster = ecs.Cluster(self, "logstash_cluster", vpc=vpc_stack.get_vpc) core.Tag.add(logstash_cluster, "project", constants["PROJECT_TAG"]) # the task logstash_task = ecs.FargateTaskDefinition( self, "logstash_task", cpu=512, memory_limit_mib=1024, ) # add container to the task logstash_task.add_container( logstash_image_asset.source_hash, image=ecs.ContainerImage.from_docker_image_asset( logstash_image_asset), logging=ecs.LogDrivers.aws_logs( stream_prefix="elkk", log_group=logstash_logs_containers), ) # add permissions to the task logstash_task.add_to_task_role_policy(access_s3_policy) logstash_task.add_to_task_role_policy(access_elastic_policy) # the service logstash_service = (ecs.FargateService( self, "logstash_service", cluster=logstash_cluster, task_definition=logstash_task, security_group=logstash_security_group, deployment_controller=ecs.DeploymentController( type=ecs.DeploymentControllerType.ECS), ).auto_scale_task_count( min_capacity=3, max_capacity=10).scale_on_cpu_utilization( "logstash_scaling", target_utilization_percent=75, scale_in_cooldown=core.Duration.seconds(60), scale_out_cooldown=core.Duration.seconds(60), ))
def __init__( self, scope: core.Construct, id: str, vpc_stack, kafka_stack, **kwargs ) -> None: super().__init__(scope, id, **kwargs) # log generator asset log_generator_py = assets.Asset( self, "log_generator", path=os.path.join(dirname, "log_generator.py") ) # log generator requirements.txt asset log_generator_requirements_txt = assets.Asset( self, "log_generator_requirements_txt", path=os.path.join(dirname, "log_generator_requirements.txt"), ) # get kakfa brokers kafka_brokers = f'''"{kafka_get_brokers().replace(",", '", "')}"''' # update filebeat.yml to .asset filebeat_yml_asset = file_updated( os.path.join(dirname, "filebeat.yml"), {"$kafka_brokers": kafka_brokers}, ) filebeat_yml = assets.Asset(self, "filebeat_yml", path=filebeat_yml_asset) elastic_repo = assets.Asset( self, "elastic_repo", path=os.path.join(dirname, "elastic.repo") ) # userdata for Filebeat fb_userdata = user_data_init(log_group_name="elkk/filebeat/instance") # instance for Filebeat fb_instance = ec2.Instance( self, "filebeat_client", instance_type=ec2.InstanceType(constants["FILEBEAT_INSTANCE"]), machine_image=ec2.AmazonLinuxImage( generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX_2 ), vpc=vpc_stack.get_vpc, vpc_subnets={"subnet_type": ec2.SubnetType.PUBLIC}, key_name=constants["KEY_PAIR"], security_group=kafka_stack.get_kafka_client_security_group, user_data=fb_userdata, ) core.Tag.add(fb_instance, "project", constants["PROJECT_TAG"]) # create policies for EC2 to connect to kafka access_kafka_policy = iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=["kafka:ListClusters", "kafka:GetBootstrapBrokers",], resources=["*"], ) # add the role permissions fb_instance.add_to_role_policy(statement=access_kafka_policy) # add log permissions instance_add_log_permissions(fb_instance) # add access to the file asset filebeat_yml.grant_read(fb_instance) elastic_repo.grant_read(fb_instance) log_generator_py.grant_read(fb_instance) log_generator_requirements_txt.grant_read(fb_instance) # add commands to the userdata fb_userdata.add_commands( # get setup assets files f"aws s3 cp s3://{filebeat_yml.s3_bucket_name}/{filebeat_yml.s3_object_key} /home/ec2-user/filebeat.yml", f"aws s3 cp s3://{elastic_repo.s3_bucket_name}/{elastic_repo.s3_object_key} /home/ec2-user/elastic.repo", f"aws s3 cp s3://{log_generator_py.s3_bucket_name}/{log_generator_py.s3_object_key} /home/ec2-user/log_generator.py", f"aws s3 cp s3://{log_generator_requirements_txt.s3_bucket_name}/{log_generator_requirements_txt.s3_object_key} /home/ec2-user/requirements.txt", # get python3 "yum install python3 -y", # get pip "yum install python-pip -y", # make log generator executable "chmod +x /home/ec2-user/log_generator.py", # get log generator requirements "python3 -m pip install -r /home/ec2-user/requirements.txt", # Filebeat "rpm --import https://packages.elastic.co/GPG-KEY-elasticsearch", # move Filebeat repo file "mv -f /home/ec2-user/elastic.repo /etc/yum.repos.d/elastic.repo", # install Filebeat "yum install filebeat -y", # move filebeat.yml to final location "mv -f /home/ec2-user/filebeat.yml /etc/filebeat/filebeat.yml", # update log generator ownership "chown -R ec2-user:ec2-user /home/ec2-user", # start Filebeat "systemctl start filebeat", ) # add the signal fb_userdata.add_signal_on_exit_command(resource=fb_instance) # attach the userdata fb_instance.add_user_data(fb_userdata.render()) # add creation policy for instance fb_instance.instance.cfn_options.creation_policy = core.CfnCreationPolicy( resource_signal=core.CfnResourceSignal(count=1, timeout="PT10M") )
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) current_directory = os.path.realpath( os.path.join(os.getcwd(), os.path.dirname(__file__))) allowed_values = yaml.load(open( os.path.join(current_directory, "..", "..", "allowed_values.yaml")), Loader=yaml.SafeLoader) ami_mapping = {"AMI": {"OEJITSI": AMI_NAME}} for region in generated_ami_ids.keys(): ami_mapping[region] = {"OEJITSI": generated_ami_ids[region]} aws_ami_region_map = core.CfnMapping(self, "AWSAMIRegionMap", mapping=ami_mapping) # utility function to parse the unique id from the stack id for # shorter resource names using cloudformation functions def append_stack_uuid(name): return core.Fn.join("-", [ name, core.Fn.select( 0, core.Fn.split( "-", core.Fn.select(2, core.Fn.split( "/", core.Aws.STACK_ID)))) ]) # # PARAMETERS # cidr_block_param = core.CfnParameter( self, "IngressCidrBlock", allowed_pattern="((\d{1,3})\.){3}\d{1,3}/\d{1,2}", default="0.0.0.0/0", description= "Required: A CIDR block to restrict access to the Jitsi application. Leave as 0.0.0.0/0 to allow public access from internet." ) ec2_instance_type_param = core.CfnParameter( self, "InstanceType", allowed_values=allowed_values["allowed_instance_types"], default="t3.xlarge", description= "Required: The EC2 instance type for the application Auto Scaling Group." ) jitsi_hostname_param = core.CfnParameter( self, "JitsiHostname", description= "Required: The hostname to access Jitsi. E.G. 'jitsi.internal.mycompany.com'" ) jitsi_interface_app_name_param = core.CfnParameter( self, "JitsiInterfaceAppName", default="Jitsi Meet", description= "Optional: Customize the app name on the Jitsi interface.") jitsi_interface_default_remote_display_name_param = core.CfnParameter( self, "JitsiInterfaceDefaultRemoteDisplayName", default="Fellow Jitster", description= "Optional: Customize the default display name for Jitsi users.") jitsi_interface_native_app_name_param = core.CfnParameter( self, "JitsiInterfaceNativeAppName", default="Jitsi Meet", description= "Optional: Customize the native app name on the Jitsi interface.") jitsi_interface_show_brand_watermark_param = core.CfnParameter( self, "JitsiInterfaceShowBrandWatermark", allowed_values=["true", "false"], default="true", description= "Optional: Display the watermark logo image in the upper left corner." ) jitsi_interface_show_watermark_for_guests_param = core.CfnParameter( self, "JitsiInterfaceShowWatermarkForGuests", allowed_values=["true", "false"], default="true", description= "Optional: Display the watermark logo image in the upper left corner for guest users. This can be set to override the general setting behavior for guest users." ) jitsi_interface_brand_watermark_param = core.CfnParameter( self, "JitsiInterfaceBrandWatermark", default="", description= "Optional: Provide a URL to a PNG image to be used as the brand watermark logo image in the upper right corner. File should be publically available for download." ) jitsi_interface_brand_watermark_link_param = core.CfnParameter( self, "JitsiInterfaceBrandWatermarkLink", default="http://jitsi.org", description= "Optional: Provide a link destination for the brand watermark logo image in the upper right corner." ) jitsi_interface_watermark_param = core.CfnParameter( self, "JitsiInterfaceWatermark", default="", description= "Optional: Provide a URL to a PNG image to be used as the watermark logo image in the upper left corner. File should be publically available for download." ) jitsi_interface_watermark_link_param = core.CfnParameter( self, "JitsiInterfaceWatermarkLink", default="http://jitsi.org", description= "Optional: Provide a link destination for the Jitsi watermark logo image in the upper left corner." ) route_53_hosted_zone_name_param = core.CfnParameter( self, "Route53HostedZoneName", description= "Required: Route 53 Hosted Zone name in which a DNS record will be created by this template. Must already exist and be the domain part of the Jitsi Hostname parameter, without trailing dot. E.G. 'internal.mycompany.com'" ) notification_email_param = core.CfnParameter( self, "NotificationEmail", default="", description= "Optional: Specify an email address to get emails about deploys, Let's Encrypt, and other system events." ) # # CONDITIONS # notification_email_exists_condition = core.CfnCondition( self, "NotificationEmailExistsCondition", expression=core.Fn.condition_not( core.Fn.condition_equals(notification_email_param.value, ""))) # # RESOURCES # # vpc vpc = Vpc(self, "Vpc") # sns sns_notification_topic = aws_sns.CfnTopic( self, "NotificationTopic", topic_name="{}-notifications".format(core.Aws.STACK_NAME)) sns_notification_subscription = aws_sns.CfnSubscription( self, "NotificationSubscription", protocol="email", topic_arn=sns_notification_topic.ref, endpoint=notification_email_param.value_as_string) sns_notification_subscription.cfn_options.condition = notification_email_exists_condition iam_notification_publish_policy = aws_iam.PolicyDocument(statements=[ aws_iam.PolicyStatement(effect=aws_iam.Effect.ALLOW, actions=["sns:Publish"], resources=[sns_notification_topic.ref]) ]) # cloudwatch app_log_group = aws_logs.CfnLogGroup( self, "JitsiAppLogGroup", retention_in_days=TWO_YEARS_IN_DAYS) app_log_group.cfn_options.update_replace_policy = core.CfnDeletionPolicy.RETAIN app_log_group.cfn_options.deletion_policy = core.CfnDeletionPolicy.RETAIN system_log_group = aws_logs.CfnLogGroup( self, "JitsiSystemLogGroup", retention_in_days=TWO_YEARS_IN_DAYS) system_log_group.cfn_options.update_replace_policy = core.CfnDeletionPolicy.RETAIN system_log_group.cfn_options.deletion_policy = core.CfnDeletionPolicy.RETAIN # iam iam_jitsi_instance_role = aws_iam.CfnRole( self, "JitsiInstanceRole", assume_role_policy_document=aws_iam.PolicyDocument(statements=[ aws_iam.PolicyStatement( effect=aws_iam.Effect.ALLOW, actions=["sts:AssumeRole"], principals=[aws_iam.ServicePrincipal("ec2.amazonaws.com")]) ]), policies=[ aws_iam.CfnRole.PolicyProperty( policy_document=aws_iam.PolicyDocument(statements=[ aws_iam.PolicyStatement( effect=aws_iam.Effect.ALLOW, actions=[ "logs:CreateLogStream", "logs:DescribeLogStreams", "logs:PutLogEvents" ], resources=[ app_log_group.attr_arn, system_log_group.attr_arn ]) ]), policy_name="AllowStreamLogsToCloudWatch"), aws_iam.CfnRole.PolicyProperty( policy_document=aws_iam.PolicyDocument(statements=[ aws_iam.PolicyStatement( effect=aws_iam.Effect.ALLOW, actions=[ "ec2:AssociateAddress", "ec2:DescribeVolumes", "ec2:DescribeTags", "cloudwatch:GetMetricStatistics", "cloudwatch:ListMetrics", "cloudwatch:PutMetricData" ], resources=["*"]) ]), policy_name="AllowStreamMetricsToCloudWatch"), aws_iam.CfnRole.PolicyProperty( policy_document=aws_iam.PolicyDocument(statements=[ aws_iam.PolicyStatement( effect=aws_iam.Effect.ALLOW, actions=["autoscaling:Describe*"], resources=["*"]) ]), policy_name="AllowDescribeAutoScaling"), ], managed_policy_arns=[ "arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore" ]) # ec2 jitsi_sg = aws_ec2.CfnSecurityGroup( self, "JitsiSg", group_description="Jitsi security group", vpc_id=vpc.id()) eip = aws_ec2.CfnEIP(self, "Eip", domain="vpc") core.Tags.of(eip).add("Name", "{}/Eip".format(core.Aws.STACK_NAME)) ec2_instance_profile = aws_iam.CfnInstanceProfile( self, "JitsiInstanceProfile", roles=[iam_jitsi_instance_role.ref]) with open("jitsi/jitsi_launch_config_user_data.sh") as f: jitsi_launch_config_user_data = f.read() ec2_launch_config = aws_autoscaling.CfnLaunchConfiguration( self, "JitsiLaunchConfig", image_id=core.Fn.find_in_map("AWSAMIRegionMap", core.Aws.REGION, "OEJITSI"), instance_type=ec2_instance_type_param.value_as_string, iam_instance_profile=ec2_instance_profile.ref, security_groups=[jitsi_sg.ref], user_data=(core.Fn.base64( core.Fn.sub( jitsi_launch_config_user_data, { "JitsiHostname": jitsi_hostname_param.value_as_string, "JitsiPublicIP": eip.ref, "LetsEncryptCertificateEmail": notification_email_param.value_as_string })))) # autoscaling asg = aws_autoscaling.CfnAutoScalingGroup( self, "JitsiAsg", launch_configuration_name=ec2_launch_config.ref, desired_capacity="1", max_size="1", min_size="1", vpc_zone_identifier=vpc.public_subnet_ids()) asg.cfn_options.creation_policy = core.CfnCreationPolicy( resource_signal=core.CfnResourceSignal(count=1, timeout="PT15M")) asg.cfn_options.update_policy = core.CfnUpdatePolicy( auto_scaling_rolling_update=core.CfnAutoScalingRollingUpdate( max_batch_size=1, min_instances_in_service=0, pause_time="PT15M", wait_on_resource_signals=True)) core.Tags.of(asg).add("Name", "{}/JitsiAsg".format(core.Aws.STACK_NAME)) jitsi_http_ingress = aws_ec2.CfnSecurityGroupIngress( self, "JitsiHttpSgIngress", cidr_ip=cidr_block_param.value_as_string, from_port=80, group_id=jitsi_sg.ref, ip_protocol="tcp", to_port=80) jitsi_https_ingress = aws_ec2.CfnSecurityGroupIngress( self, "JitsiHttpsSgIngress", cidr_ip=cidr_block_param.value_as_string, from_port=443, group_id=jitsi_sg.ref, ip_protocol="tcp", to_port=443) jitsi_fallback_network_audio_video_ingress = aws_ec2.CfnSecurityGroupIngress( self, "JitsiFallbackNetworkAudioVideoSgIngress", cidr_ip=cidr_block_param.value_as_string, from_port=4443, group_id=jitsi_sg.ref, ip_protocol="tcp", to_port=4443) jitsi_general_network_audio_video_ingress = aws_ec2.CfnSecurityGroupIngress( self, "JitsiGeneralNetworkAudioVideoSgIngress", cidr_ip=cidr_block_param.value_as_string, from_port=10000, group_id=jitsi_sg.ref, ip_protocol="udp", to_port=10000) # route 53 record_set = aws_route53.CfnRecordSet( self, "RecordSet", hosted_zone_name= f"{route_53_hosted_zone_name_param.value_as_string}.", name=jitsi_hostname_param.value_as_string, resource_records=[eip.ref], type="A") # https://github.com/aws/aws-cdk/issues/8431 record_set.add_property_override("TTL", 60) # AWS::CloudFormation::Interface self.template_options.metadata = { "OE::Patterns::TemplateVersion": template_version, "AWS::CloudFormation::Interface": { "ParameterGroups": [{ "Label": { "default": "Infrastructure Config" }, "Parameters": [ jitsi_hostname_param.logical_id, route_53_hosted_zone_name_param.logical_id, cidr_block_param.logical_id, ec2_instance_type_param.logical_id, notification_email_param.logical_id ] }, { "Label": { "default": "Jitsi Config" }, "Parameters": [ jitsi_interface_app_name_param.logical_id, jitsi_interface_default_remote_display_name_param. logical_id, jitsi_interface_native_app_name_param.logical_id, jitsi_interface_show_brand_watermark_param.logical_id, jitsi_interface_show_watermark_for_guests_param. logical_id, jitsi_interface_brand_watermark_param.logical_id, jitsi_interface_brand_watermark_link_param.logical_id, jitsi_interface_watermark_param.logical_id, jitsi_interface_watermark_link_param.logical_id, ] }, *vpc.metadata_parameter_group()], "ParameterLabels": { cidr_block_param.logical_id: { "default": "Ingress CIDR Block" }, ec2_instance_type_param.logical_id: { "default": "EC2 instance type" }, jitsi_hostname_param.logical_id: { "default": "Jitsi Hostname" }, jitsi_interface_app_name_param.logical_id: { "default": "Jitsi Interface App Name" }, jitsi_interface_default_remote_display_name_param.logical_id: { "default": "Jitsi Interface Default Remote Display Name" }, jitsi_interface_native_app_name_param.logical_id: { "default": "Jitsi Interface Native App Name" }, jitsi_interface_show_brand_watermark_param.logical_id: { "default": "Jitsi Interface Show Watermark" }, jitsi_interface_show_watermark_for_guests_param.logical_id: { "default": "Jitsi Interface Show Watermark For Guests" }, jitsi_interface_brand_watermark_param.logical_id: { "default": "Jitsi Interface Watermark" }, jitsi_interface_brand_watermark_link_param.logical_id: { "default": "Jitsi Interface Watermark Link" }, jitsi_interface_watermark_param.logical_id: { "default": "Jitsi Interface Watermark" }, jitsi_interface_watermark_link_param.logical_id: { "default": "Jitsi Interface Watermark Link" }, notification_email_param.logical_id: { "default": "Notification Email" }, route_53_hosted_zone_name_param.logical_id: { "default": "AWS Route 53 Hosted Zone Name" }, **vpc.metadata_parameter_labels() } } } # # OUTPUTS # eip_output = core.CfnOutput( self, "EipOutput", description= "The Elastic IP address dynamically mapped to the autoscaling group instance.", value=eip.ref) endpoint_output = core.CfnOutput( self, "JitsiUrl", description="The URL for the Jitsi instance.", value=core.Fn.join( "", ["https://", jitsi_hostname_param.value_as_string]))
def __init__( self, scope: core.Construct, id: str, vpc_stack, # kafka_stack, client: bool = True, **kwargs, ) -> None: super().__init__(scope, id, **kwargs) # ensure that the service linked role exists ensure_service_linked_role("es.amazonaws.com") # cloudwatch log group elastic_log_group = logs.LogGroup( self, "elastic_log_group", log_group_name="elkk/elastic/aes", removal_policy=core.RemovalPolicy.DESTROY, retention=logs.RetentionDays.ONE_WEEK, ) # security group for elastic client elastic_client_security_group = ec2.SecurityGroup( self, "elastic_client_security_group", vpc=vpc_stack.get_vpc, description="elastic client security group", allow_all_outbound=True, ) core.Tag.add(elastic_client_security_group, "project", constants["PROJECT_TAG"]) core.Tag.add(elastic_client_security_group, "Name", "elastic_client_sg") # Open port 22 for SSH elastic_client_security_group.add_ingress_rule( ec2.Peer.ipv4(f"{external_ip}/32"), ec2.Port.tcp(22), "from own public ip", ) # Open port for tunnel elastic_client_security_group.add_ingress_rule( ec2.Peer.ipv4(f"{external_ip}/32"), ec2.Port.tcp(9200), "for ssh tunnel", ) # security group for elastic self.elastic_security_group = ec2.SecurityGroup( self, "elastic_security_group", vpc=vpc_stack.get_vpc, description="elastic security group", allow_all_outbound=True, ) core.Tag.add(self.elastic_security_group, "project", constants["PROJECT_TAG"]) core.Tag.add(self.elastic_security_group, "Name", "elastic_sg") # ingress for elastic from self self.elastic_security_group.connections.allow_from( self.elastic_security_group, ec2.Port.all_traffic(), "within elastic", ) # ingress for elastic from elastic client self.elastic_security_group.connections.allow_from( elastic_client_security_group, ec2.Port.all_traffic(), "from elastic client", ) # ingress for elastic client from elastic elastic_client_security_group.connections.allow_from( self.elastic_security_group, ec2.Port.all_traffic(), "from elastic", ) # elastic policy elastic_policy = iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ "es:*", ], resources=["*"], ) elastic_policy.add_any_principal() elastic_document = iam.PolicyDocument() elastic_document.add_statements(elastic_policy) # cluster config cluster_config = { "instanceCount": constants["ELASTIC_INSTANCE_COUNT"], "instanceType": constants["ELASTIC_INSTANCE"], "zoneAwarenessEnabled": True, "zoneAwarenessConfig": { "availabilityZoneCount": 3 }, } if constants["ELASTIC_DEDICATED_MASTER"] == True: cluster_config["dedicatedMasterEnabled"] = True cluster_config["dedicatedMasterType"] = constants[ "ELASTIC_MASTER_INSTANCE"] cluster_config["dedicatedMasterCount"] = constants[ "ELASTIC_MASTER_COUNT"] # create the elastic cluster self.elastic_domain = aes.CfnDomain( self, "elastic_domain", elasticsearch_cluster_config=cluster_config, elasticsearch_version=constants["ELASTIC_VERSION"], ebs_options={ "ebsEnabled": True, "volumeSize": 10 }, vpc_options={ "securityGroupIds": [self.elastic_security_group.security_group_id], "subnetIds": vpc_stack.get_vpc_private_subnet_ids, }, access_policies=elastic_document, #log_publishing_options={"enabled": True}, #cognito_options={"enabled": True}, ) core.Tag.add(self.elastic_domain, "project", constants["PROJECT_TAG"]) # instance for elasticsearch if client == True: # userdata for kafka client elastic_userdata = user_data_init( log_group_name="elkk/elastic/instance") # create the instance elastic_instance = ec2.Instance( self, "elastic_client", instance_type=ec2.InstanceType( constants["ELASTIC_CLIENT_INSTANCE"]), machine_image=ec2.AmazonLinuxImage( generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX_2), vpc=vpc_stack.get_vpc, vpc_subnets={"subnet_type": ec2.SubnetType.PUBLIC}, key_name=constants["KEY_PAIR"], security_group=elastic_client_security_group, user_data=elastic_userdata, ) core.Tag.add(elastic_instance, "project", constants["PROJECT_TAG"]) # needs elastic domain to be available elastic_instance.node.add_dependency(self.elastic_domain) # create policies for EC2 to connect to Elastic access_elastic_policy = iam.PolicyStatement( effect=iam.Effect.ALLOW, actions=[ "es:ListDomainNames", "es:DescribeElasticsearchDomain", "es:ESHttpPut", ], resources=["*"], ) # add the role permissions elastic_instance.add_to_role_policy( statement=access_elastic_policy) # add log permissions instance_add_log_permissions(elastic_instance) # add the signal elastic_userdata.add_signal_on_exit_command( resource=elastic_instance) # add creation policy for instance elastic_instance.instance.cfn_options.creation_policy = core.CfnCreationPolicy( resource_signal=core.CfnResourceSignal(count=1, timeout="PT10M"))
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # Parameter LatestAmiId = core.CfnParameter( self, "LatestAmiId", type="AWS::SSM::Parameter::Value<AWS::EC2::Image::Id>", default= "/aws/service/ami-amazon-linux-latest/amzn-ami-hvm-x86_64-gp2") # S3 Bucket source_bucket = "sourcebucketname%s" % (core.Aws.ACCOUNT_ID) # Resources CloudFormationLogs = logs.LogGroup( self, 'CloudFormationLogs', retention=logs.RetentionDays('ONE_WEEK')) WebInstance1 = ec2.CfnInstance( self, 'WebInstance1', additional_info=None, affinity=None, iam_instance_profile=core.Fn.import_value( "WebServerInstanceProfileOutput"), image_id=LatestAmiId.value_as_string, instance_type='t3.micro', network_interfaces=[{ "deviceIndex": "0", "groupSet": [core.Fn.import_value("WebSecurityGroupOutput")], "subnetId": core.Fn.import_value("PrivateSubnet1") }], tags=[core.CfnTag(key="Name", value="WebServer1")], user_data=core.Fn.base64("""#!/bin/bash -ex yum update -y /opt/aws/bin/cfn-init -v --stack {StackName} --resource WebInstance1 --configsets InstallAndDeploy --region {Region} # Signal the status from cfn-init (via $?) /opt/aws/bin/cfn-signal -e $? --stack {StackName} --resource WebInstance1 --region {Region} """.format(StackName=core.Aws.STACK_NAME, Region=core.Aws.REGION))) WebInstance1.cfn_options.metadata = { "AWS::CloudFormation::Authentication": { "rolebased": { "type": "S3", "buckets": [source_bucket], "roleName": core.Fn.import_value("WebServerRoleOutput") } }, "AWS::CloudFormation::Init": { "configSets": { "InstallAndDeploy": ["Install", "InstallLogs", "Deploy"] }, "Install": { "packages": { "yum": { "python36": [], "python36-devel": [], "nginx": [], "gcc": [] } }, "files": { "/etc/cfn/cfn-hup.conf": { "content": """ [main] stack={} region={} interval=1 verbose=true""".format(core.Aws.STACK_ID, core.Aws.REGION), "mode": "000400", "owner": "root", "group": "root" }, "/etc/cfn/hooks.d/cfn-auto-reloader.conf": { "content": """ [cfn-auto-reloader-hook] triggers=post.update path=Resources.WebInstance1.Metadata.AWS::CloudFormation::Init action=/opt/aws/bin/cfn-init -v --stack {} --resource WebInstance1 --configsets InstallAndDeploy --region {} runas=root""".format(core.Aws.STACK_NAME, core.Aws.REGION), "mode": "000400", "owner": "root", "group": "root" } }, "services": { "sysvinit": { "nginx": { "enabled": "true", "ensureRunning": "true" }, "cfn-hup": { "enabled": "true", "ensureRunning": "true", "files": [ "/etc/cfn/cfn-hup.conf", "/etc/cfn/hooks.d/cfn-auto-reloader.conf" ] } } }, "commands": { "01_unblock_nginx": { "command": "chkconfig nginx on" }, "02_install_xray": { "command": "curl https://s3.dualstack.us-east-2.amazonaws.com/aws-xray-assets.us-east-2/xray-daemon/aws-xray-daemon-3.x.rpm -o /tmp/xray.rpm && yum install -y /tmp/xray.rpm\n", "cwd": "/tmp", "ignoreErrors": "true" } } }, "InstallLogs": { "packages": { "yum": { "awslogs": [] } }, "files": { "/etc/awslogs/awslogs.conf": { "content": """ [general] state_file= /var/awslogs/state/agent-state [yum] file = /var/log/yum.log log_group_name = %s log_stream_name = {{hostname}} - {{instance_id}} yum.log [messages] file = /var/log/messages log_group_name = {CloudFormationLogs} log_stream_name = {{hostname}} - {{instance_id}} messages.log [cfn-hup] file = /var/log/cfn-hup.log log_group_name = {CloudFormationLogs} log_stream_name = {{hostname}} - {{instance_id}} cfn-hup.log [cfn-init] file = /var/log/cfn-init.log log_group_name = {CloudFormationLogs} log_stream_name = {{hostname}} - {{instance_id}} cfn-init.log [cfn-init-cmd] file = /var/log/cfn-init-cmd.log log_group_name = {CloudFormationLogs} log_stream_name = {{hostname}} - {{instance_id}} cfn-init-cmd.log [cloud-init] file = /var/log/cloud-init.log log_group_name = {CloudFormationLogs} log_stream_name = {{hostname}} - {{instance_id}} cloud-init.log [cloud-init-output] file = /var/log/cloud-init-output.log log_group_name = {CloudFormationLogs} log_stream_name = {{hostname}} - {{instance_id}} cloud-init.log [handler] file = /var/log/handler.log log_group_name = {CloudFormationLogs} log_stream_name = {{hostname}} - {{instance_id}} handler.log [uwsgi] file = /var/log/uwsgi.log log_group_name = {CloudFormationLogs} log_stream_name = {{hostname}} - {{instance_id}} uwsgi.log [nginx_access] file = /var/log/nginx/access.log log_group_name = {CloudFormationLogs} log_stream_name = {{hostname}} - {{instance_id}} nginx_access.log [nginx_error] file = /var/log/nginx/error.log log_group_name = {CloudFormationLogs} log_stream_name = {{hostname}} - {{instance_id}} nginx_error.log """.format(CloudFormationLogs=CloudFormationLogs. log_group_name), "group": "root", "owner": "root", "mode": "000400" }, "/etc/awslogs/awscli.conf": { "content": """ [plugins] cwlogs = cwlogs [default] region = {} """.format(core.Aws.REGION), "mode": "000444", "owner": "root", "group": "root" } }, "commands": { "01_create_state_directory": { "command": "mkdir -p /var/awslogs/state" } }, "services": { "sysvinit": { "awslogs": { "enabled": "true", "ensureRunning": "true", "files": ["/etc/awslogs/awslogs.conf"] } } } }, "Deploy": { "sources": { "/photos": "https://s3.amazonaws.com/{}/deploy-app.zip".format( source_bucket) }, "commands": { "01_pip_uwsgi": { "command": "pip-3.6 install uwsgi", "cwd": "/photos", "ignoreErrors": "false" }, "02_pip_flask_app_requirements": { "command": "pip-3.6 install -r requirements.txt", "cwd": "/photos/FlaskApp", "ignoreErrors": "false" }, "03_stop_uwsgi": { "command": "stop uwsgi", "ignoreErrors": "true" }, "04_stop_nginx": { "command": "service nginx stop" }, "05_copy_config": { "command": "mv -f nginx.conf /etc/nginx/nginx.conf && mv -f uwsgi.conf /etc/init/uwsgi.conf", "cwd": "/photos/Deploy", "ignoreErrors": "false" }, "06_create_database": { "command": "python3 database_create_tables.py", "cwd": "/photos/Deploy", "ignoreErrors": "false" }, "07_start_uwsgi": { "command": "start uwsgi" }, "08_restart_nginx": { "command": "service nginx start" } } } } } WebInstance1.cfn_options.creation_policy = core.CfnCreationPolicy( resource_signal=core.CfnResourceSignal(timeout='PT10M')) WebInstance2 = ec2.CfnInstance( self, 'WebInstance2', additional_info=None, affinity=None, iam_instance_profile=core.Fn.import_value( "WebServerInstanceProfileOutput"), image_id=LatestAmiId.value_as_string, instance_type='t3.micro', network_interfaces=[{ "deviceIndex": "0", "groupSet": [core.Fn.import_value("WebSecurityGroupOutput")], "subnetId": core.Fn.import_value("PrivateSubnet2") }], tags=[core.CfnTag(key="Name", value="WebServer2")], user_data=core.Fn.base64("""#!/bin/bash -ex yum update -y /opt/aws/bin/cfn-init -v --stack {StackName} --resource WebInstance2 --configsets InstallAndDeploy --region {Region} # Signal the status from cfn-init (via $?) /opt/aws/bin/cfn-signal -e $? --stack {StackName} --resource WebInstance2 --region {Region} """.format(StackName=core.Aws.STACK_NAME, Region=core.Aws.REGION))) WebInstance2.cfn_options.metadata = { "AWS::CloudFormation::Authentication": { "rolebased": { "type": "S3", "buckets": [source_bucket], "roleName": core.Fn.import_value("WebServerRoleOutput") } }, "AWS::CloudFormation::Init": { "configSets": { "InstallAndDeploy": ["Install", "InstallLogs", "Deploy"] }, "Install": { "packages": { "yum": { "python36": [], "python36-devel": [], "nginx": [], "gcc": [] } }, "files": { "/etc/cfn/cfn-hup.conf": { "content": """ [main] stack={} region={} interval=1 verbose=true""".format(core.Aws.STACK_ID, core.Aws.REGION), "mode": "000400", "owner": "root", "group": "root" }, "/etc/cfn/hooks.d/cfn-auto-reloader.conf": { "content": """ [cfn-auto-reloader-hook] triggers=post.update path=Resources.WebInstance1.Metadata.AWS::CloudFormation::Init action=/opt/aws/bin/cfn-init -v --stack {} --resource WebInstance2 --configsets InstallAndDeploy --region {} runas=root""".format(core.Aws.STACK_NAME, core.Aws.REGION), "mode": "000400", "owner": "root", "group": "root" } }, "services": { "sysvinit": { "nginx": { "enabled": "true", "ensureRunning": "true" }, "cfn-hup": { "enabled": "true", "ensureRunning": "true", "files": [ "/etc/cfn/cfn-hup.conf", "/etc/cfn/hooks.d/cfn-auto-reloader.conf" ] } } }, "commands": { "01_unblock_nginx": { "command": "chkconfig nginx on" }, "02_install_xray": { "command": "curl https://s3.dualstack.us-east-2.amazonaws.com/aws-xray-assets.us-east-2/xray-daemon/aws-xray-daemon-3.x.rpm -o /tmp/xray.rpm && yum install -y /tmp/xray.rpm\n", "cwd": "/tmp", "ignoreErrors": "true" } } }, "InstallLogs": { "packages": { "yum": { "awslogs": [] } }, "files": { "/etc/awslogs/awslogs.conf": { "content": """ [general] state_file= /var/awslogs/state/agent-state [yum] file = /var/log/yum.log log_group_name = %s log_stream_name = {{hostname}} - {{instance_id}} yum.log [messages] file = /var/log/messages log_group_name = {CloudFormationLogs} log_stream_name = {{hostname}} - {{instance_id}} messages.log [cfn-hup] file = /var/log/cfn-hup.log log_group_name = {CloudFormationLogs} log_stream_name = {{hostname}} - {{instance_id}} cfn-hup.log [cfn-init] file = /var/log/cfn-init.log log_group_name = {CloudFormationLogs} log_stream_name = {{hostname}} - {{instance_id}} cfn-init.log [cfn-init-cmd] file = /var/log/cfn-init-cmd.log log_group_name = {CloudFormationLogs} log_stream_name = {{hostname}} - {{instance_id}} cfn-init-cmd.log [cloud-init] file = /var/log/cloud-init.log log_group_name = {CloudFormationLogs} log_stream_name = {{hostname}} - {{instance_id}} cloud-init.log [cloud-init-output] file = /var/log/cloud-init-output.log log_group_name = {CloudFormationLogs} log_stream_name = {{hostname}} - {{instance_id}} cloud-init.log [handler] file = /var/log/handler.log log_group_name = {CloudFormationLogs} log_stream_name = {{hostname}} - {{instance_id}} handler.log [uwsgi] file = /var/log/uwsgi.log log_group_name = {CloudFormationLogs} log_stream_name = {{hostname}} - {{instance_id}} uwsgi.log [nginx_access] file = /var/log/nginx/access.log log_group_name = {CloudFormationLogs} log_stream_name = {{hostname}} - {{instance_id}} nginx_access.log [nginx_error] file = /var/log/nginx/error.log log_group_name = {CloudFormationLogs} log_stream_name = {{hostname}} - {{instance_id}} nginx_error.log """.format(CloudFormationLogs=CloudFormationLogs. log_group_name), "group": "root", "owner": "root", "mode": "000400" }, "/etc/awslogs/awscli.conf": { "content": """ [plugins] cwlogs = cwlogs [default] region = {} """.format(core.Aws.REGION), "mode": "000444", "owner": "root", "group": "root" } }, "commands": { "01_create_state_directory": { "command": "mkdir -p /var/awslogs/state" } }, "services": { "sysvinit": { "awslogs": { "enabled": "true", "ensureRunning": "true", "files": ["/etc/awslogs/awslogs.conf"] } } } }, "Deploy": { "sources": { "/photos": "https://s3.amazonaws.com/{}/deploy-app.zip".format( source_bucket) }, "commands": { "01_pip_uwsgi": { "command": "pip-3.6 install uwsgi", "cwd": "/photos", "ignoreErrors": "false" }, "02_pip_flask_app_requirements": { "command": "pip-3.6 install -r requirements.txt", "cwd": "/photos/FlaskApp", "ignoreErrors": "false" }, "03_stop_uwsgi": { "command": "stop uwsgi", "ignoreErrors": "true" }, "04_stop_nginx": { "command": "service nginx stop" }, "05_copy_config": { "command": "mv -f nginx.conf /etc/nginx/nginx.conf && mv -f uwsgi.conf /etc/init/uwsgi.conf", "cwd": "/photos/Deploy", "ignoreErrors": "false" }, "06_create_database": { "command": "python3 database_create_tables.py", "cwd": "/photos/Deploy", "ignoreErrors": "false" }, "07_start_uwsgi": { "command": "start uwsgi" }, "08_restart_nginx": { "command": "service nginx start" } } } } } WebInstance2.cfn_options.creation_policy = core.CfnCreationPolicy( resource_signal=core.CfnResourceSignal(timeout='PT10M')) DefaultTargetGroup = elasticloadbalancingv2.CfnTargetGroup( self, 'DefaultTargetGroup', health_check_interval_seconds=15, health_check_path="/", health_check_protocol="HTTP", health_check_timeout_seconds=10, healthy_threshold_count=2, unhealthy_threshold_count=2, matcher={'httpCode': '200-299'}, port=80, protocol="HTTP", vpc_id=core.Fn.import_value("VPC"), target_group_attributes=[{ "key": "deregistration_delay.timeout_seconds", "value": "30" }], targets=[{ "id": WebInstance1.ref, "port": 80 }, { "id": WebInstance2.ref, "port": 80 }]) HttpListener = elasticloadbalancingv2.CfnListener( self, 'HttpListener', default_actions=[{ "type": "forward", "targetGroupArn": DefaultTargetGroup.ref }], load_balancer_arn=core.Fn.import_value("LoadBalancerArn"), port=80, protocol="HTTP")