Пример #1
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        self.vpc = aws_ec2.Vpc(self, "Vpc", cidr="10.0.0.0/16", nat_gateways=1)

        self.vpc.node.apply_aspect(core.Tag("app_name", "vpc"))
        self.vpc.node.apply_aspect(core.Tag("stack_type", "vpc"))
Пример #2
0
    def __init__(self, scope: core.Construct, id: str, cidr_vpc, cidr_mask,
                 nat_gateways, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        # The code that defines your stack goes here

        self.vpc = ec2.Vpc(
            self,
            "VPC",
            max_azs=2,
            cidr=cidr_vpc,
            # configuration will create 3 groups in 2 AZs = 6 subnets.
            subnet_configuration=[
                ec2.SubnetConfiguration(subnet_type=ec2.SubnetType.PUBLIC,
                                        name="Public",
                                        cidr_mask=cidr_mask),
                ec2.SubnetConfiguration(subnet_type=ec2.SubnetType.PRIVATE,
                                        name="Private",
                                        cidr_mask=cidr_mask)
            ],
            # nat_gateway_provider=ec2.NatProvider.gateway(),
            nat_gateways=nat_gateways,
        )

        core.Tag(key="Owner", value="Wahaj-vpc")
        core.CfnOutput(self, "Output", value=self.vpc.vpc_id)
Пример #3
0
    def __init__(self, scope: core.Construct, construct_id: str, vpc,
                 nlb_listener_port, nlb_name, nlb_id, internet_facing,
                 targetgroup_port, **kwargs) -> None:
        super().__init__(scope, construct_id, **kwargs)

        #network load balancer
        nlb = elb.NetworkLoadBalancer(
            self,
            internet_facing=internet_facing,
            load_balancer_name=nlb_name,
            id=nlb_id,
            vpc=vpc,  # The object from above vpc from lookup
            vpc_subnets=ec2.SubnetSelection(subnets=vpc.public_subnets))

        #load balancer scuirty group
        sg_nlb = ec2.SecurityGroup(self,
                                   id="sg_nlb",
                                   vpc=vpc,
                                   security_group_name="sg_nlb")

        #listener
        listener = nlb.add_listener("Listener",
                                    port=nlb_listener_port,
                                    protocol=elb.Protocol.TCP)
        target_group = elb.NetworkTargetGroup(self,
                                              vpc=vpc,
                                              id="Target",
                                              port=targetgroup_port)
        listener.add_target_groups("TargetGroup", target_group)

        #sg_nlb ingress
        sg_nlb.add_ingress_rule(peer=ec2.Peer.ipv4("0.0.0.0/0"),
                                connection=ec2.Port.tcp(22))
        core.Tag(key="Owner", value="Wahaj-nlb")
Пример #4
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        ZachESInstanceName = self.__class__.__name__
        DiskOpt={
          "EBSEnabled" : True,
          "VolumeSize" : 10,
          "VolumeType" : "gp2"
        }
        ESconfig = {
          "DedicatedMasterCount" : 3,
          "DedicatedMasterEnabled" : True,
          "DedicatedMasterType" : "m4.large.elasticsearch",
          "InstanceCount" : 3,
          "InstanceType" : "m4.large.elasticsearch",
          "ZoneAwarenessEnabled" : True
        }
        vpc = ec2.Vpc.from_lookup(self,id="Zach_VPC_Stack_A",vpc_id="vpc-01e73b4b5c6f9f98a")

        SubnetIDList,SGList=[],[]
        for subnet in vpc.public_subnets:
          SubnetIDList.append(subnet.subnet_id)

        vpc_options={"SubnetIds" :SubnetIDList,
                     "SecurityGroupIds" : []}

        '''
        EBS storage must be selected for m4.large.elasticsearch (Service: AWSElasticsearch; Status Code: 400; Error Code: ValidationException
        '''
        ZachESInstance=es.CfnDomain(self, id=ZachESInstanceName,
                                    domain_name=ZachESInstanceName.replace("_","").lower(),
                                    ebs_options=DiskOpt,
                                    elasticsearch_version='7.1',
                                    snapshot_options={"AutomatedSnapshotStartHour":2},
                                    vpc_options=vpc_options,
                                    elasticsearch_cluster_config=ESconfig,
                                    tags=[core.Tag(key="Environment",value="prod"),
                                          core.Tag(key="Type",value="Log")],
                                    encryption_at_rest_options={"Enabled": True,"KmsKeyId": self.GenerateKmsKey(ZachESInstanceName)},
                                    node_to_node_encryption_options={"Enabled" : True},
                                    advanced_options={"indices.query.bool.max_clause_count":"2000"}
                                    )
        core.CfnOutput(self, ZachESInstanceName + "ID", value=ZachESInstance.logical_id)
        core.CfnOutput(self, ZachESInstanceName + "Domain", value=ZachESInstance.domain_name)
        core.CfnOutput(self, ZachESInstanceName + "Ver", value=ZachESInstance.elasticsearch_version)
        core.CfnOutput(self, ZachESInstanceName + "VPC", value=ZachESInstance.vpc_options.to_string())
Пример #5
0
    def launch_function_config(self, emr_profile, cluster_configuration,
                               default_fail_if_cluster_running):

        return emr_launch_function.EMRLaunchFunction(
            self,
            self._cluster_name,
            namespace=self._cluster_name,
            launch_function_name="launch-fn",
            emr_profile=emr_profile,
            cluster_configuration=cluster_configuration,
            cluster_name=self._cluster_name,
            default_fail_if_cluster_running=default_fail_if_cluster_running,
            allowed_cluster_config_overrides=cluster_configuration.
            override_interfaces['default'],
            cluster_tags=[core.Tag(key='Group', value='AWSDemo')])
Пример #6
0
    def from_json(self, property_values):
        self._launch_function_name = property_values['LaunchFunctionName']
        self._namespace = property_values['Namespace']

        profile_parts = property_values['EMRProfile'].split('/')
        self._emr_profile = emr_profile.EMRProfile.from_stored_profile(
            self, 'EMRProfile', profile_parts[1], profile_parts[0])
        config_parts = property_values['ClusterConfiguration'].split('/')
        self._cluster_configuration = cluster_configuration.ClusterConfiguration.from_stored_configuration(
            self, 'ClusterConfiguration', config_parts[1], config_parts[0])

        self._cluster_name = property_values['ClusterName']
        self._default_fail_if_cluster_running = property_values[
            'DefaultFailIfClusterRunning']

        topic = property_values.get('SuccessTopic', None)
        self._success_topic = sns.Topic.from_topic_arn(self, 'SuccessTopic', topic) \
            if topic is not None \
            else None

        topic = property_values.get('FailureTopic', None)
        self._failure_topic = sns.Topic.from_topic_arn(self, 'FailureTopic', topic) \
            if topic is not None \
            else None

        func = property_values.get('OverrideClusterConfigsLambda', None)
        self._override_cluster_configs_lambda = aws_lambda.Function.from_function_arn(
            self, 'OverrideClusterConfigsLambda', func) \
            if func is not None \
            else None

        self._allowed_cluster_config_overrides = property_values.get(
            'AllowedClusterConfigOverrides', None)
        self._description = property_values.get('Description', None)
        self._cluster_tags = [
            core.Tag(t['Key'], t['Value'])
            for t in property_values['ClusterTags']
        ]

        state_machine = property_values['StateMachine']
        self._state_machine = sfn.StateMachine.from_state_machine_arn(
            self, 'StateMachine', state_machine)

        self._wait_for_cluster_start = property_values.get(
            'WaitForClusterStart', None)
        return self
Пример #7
0
def test_load_cluster_configuration_builder():
    default_task_json = {
        'End': True,
        'Retry': [{
            'ErrorEquals': ['Lambda.ServiceException', 'Lambda.AWSLambdaException', 'Lambda.SdkClientException'],
            'IntervalSeconds': 2,
            'MaxAttempts': 6,
            'BackoffRate': 2
        }],
        'Type': 'Task',
        'Resource': {
            'Fn::GetAtt': ['testtaskLoadClusterConfiguration518ECBAD', 'Arn']
        },
        'Parameters': {
            'ClusterName': 'test-cluster',
            'ClusterTags': [{
                'Key': 'Key1',
                'Value': 'Value1'
            }],
            'ProfileNamespace': 'test',
            'ProfileName': 'test-profile',
            'ConfigurationNamespace': 'test',
            'ConfigurationName': 'test-configuration'
        }
    }

    stack = core.Stack(core.App(), 'test-stack')

    task = emr_tasks.LoadClusterConfigurationBuilder.build(
        stack, 'test-task',
        cluster_name='test-cluster',
        cluster_tags=[core.Tag('Key1', 'Value1')],
        profile_namespace='test',
        profile_name='test-profile',
        configuration_namespace='test',
        configuration_name='test-configuration',
    )

    print_and_assert(default_task_json, task)
Пример #8
0
def test_load_cluster_configuration_builder():
    default_task_json = {
        'End': True,
        'OutputPath': '$',
        'Parameters': {
            'ClusterName': 'test-cluster',
            'ClusterTags': [{
                'Key': 'Key1',
                'Value': 'Value1'
            }],
            'ConfigurationName': 'test-configuration',
            'ConfigurationNamespace': 'test',
            'ProfileName': 'test-profile',
            'ProfileNamespace': 'test'
        },
        'Resource': {
            'Fn::GetAtt': ['testtaskLoadClusterConfiguration518ECBAD', 'Arn']
        },
        'ResultPath': '$.ClusterConfiguration',
        'Type': 'Task'
    }

    stack = core.Stack(core.App(), 'test-stack')

    task = emr_tasks.LoadClusterConfigurationBuilder.build(
        stack,
        'test-task',
        cluster_name='test-cluster',
        cluster_tags=[core.Tag('Key1', 'Value1')],
        profile_namespace='test',
        profile_name='test-profile',
        configuration_namespace='test',
        configuration_name='test-configuration',
    )

    print_and_assert(default_task_json, task)
Пример #9
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        core.Tag("Project", "BeerTracker")

        beerdb = _dynamo.Table(
            self,
            'beers',
            table_name="Beers",
            billing_mode=_dynamo.BillingMode.PAY_PER_REQUEST,
            partition_key=_dynamo.Attribute(name="LocationName",
                                            type=_dynamo.AttributeType.STRING),
            sort_key=_dynamo.Attribute(name="BeerName",
                                       type=_dynamo.AttributeType.STRING),
            removal_policy=core.RemovalPolicy.DESTROY)

        locationdb = _dynamo.Table(
            self,
            'locations',
            table_name="Locations",
            billing_mode=_dynamo.BillingMode.PAY_PER_REQUEST,
            partition_key=_dynamo.Attribute(name="LocationName",
                                            type=_dynamo.AttributeType.STRING),
            removal_policy=core.RemovalPolicy.DESTROY)
Пример #10
0
    def __init__(
            self,
            scope: core.Construct,
            id: str,
            *,
            launch_function_name: str,
            emr_profile: emr_profile.EMRProfile,
            cluster_configuration: cluster_configuration.ClusterConfiguration,
            cluster_name: str = None,
            namespace: str = 'default',
            default_fail_if_cluster_running: bool = False,
            success_topic: Optional[sns.Topic] = None,
            failure_topic: Optional[sns.Topic] = None,
            override_cluster_configs_lambda: Optional[
                aws_lambda.Function] = None,
            allowed_cluster_config_overrides: Optional[Dict[str,
                                                            Dict[str,
                                                                 str]]] = None,
            description: Optional[str] = None,
            cluster_tags: Union[List[core.Tag], Dict[str, str], None] = None,
            wait_for_cluster_start: bool = True) -> None:
        super().__init__(scope, id)

        if launch_function_name is None:
            return

        self._launch_function_name = launch_function_name
        self._namespace = namespace
        self._emr_profile = emr_profile
        self._cluster_configuration = cluster_configuration
        self._cluster_name = cluster_name
        self._default_fail_if_cluster_running = default_fail_if_cluster_running
        self._success_topic = success_topic
        self._failure_topic = failure_topic
        self._override_cluster_configs_lambda = override_cluster_configs_lambda
        self._description = description
        self._wait_for_cluster_start = wait_for_cluster_start

        if allowed_cluster_config_overrides is None:
            self._allowed_cluster_config_overrides = cluster_configuration.override_interfaces.get(
                'default', None)
        else:
            self._allowed_cluster_config_overrides = allowed_cluster_config_overrides

        if isinstance(cluster_tags, dict):
            self._cluster_tags = [
                core.Tag(k, v) for k, v in cluster_tags.items()
            ]
        elif isinstance(cluster_tags, list):
            self._cluster_tags = cluster_tags
        else:
            self._cluster_tags = []

        self._cluster_tags.extend([
            core.Tag('deployment:product:name', __product__),
            core.Tag('deployment:product:version', __version__)
        ])

        if len(cluster_configuration.configuration_artifacts) > 0:
            if emr_profile.mutable_instance_role:
                for i in range(
                        len(cluster_configuration.configuration_artifacts)):
                    configuration_artifact = cluster_configuration.configuration_artifacts[
                        i]
                    bucket_name = configuration_artifact['Bucket']
                    path = configuration_artifact['Path']
                    bucket = s3.Bucket.from_bucket_name(
                        self, f'Bucket_{i}', bucket_name)
                    bucket.grant_read(emr_profile.roles.instance_role, path)
            else:
                logger.warn(
                    '--------------------------------------------------------------------------'
                )
                logger.warn(
                    'Unable to authorize the artifacts in the ClusterConfiguration'
                )
                logger.warn(
                    f'The EMRProfile {emr_profile.profile_name} has an immutable Instance Role'
                )
                logger.warn(
                    'Use of these artifacts will require direct authorization on the EMRProfile'
                )
                logger.warn(
                    '--------------------------------------------------------------------------'
                )

        fail = emr_chains.Fail(
            self,
            'FailChain',
            message=sfn.TaskInput.from_data_at('$.Error'),
            subject='EMR Launch Function Failure',
            topic=failure_topic,
            error='Failed to Launch Cluster',
            cause=
            'See Execution Event "FailStateEntered" for complete error cause')

        # Create Task for loading the cluster configuration from Parameter Store
        load_cluster_configuration = emr_tasks.LoadClusterConfigurationBuilder.build(
            self,
            'LoadClusterConfigurationTask',
            cluster_name=cluster_name,
            cluster_tags=self._cluster_tags,
            profile_namespace=emr_profile.namespace,
            profile_name=emr_profile.profile_name,
            configuration_namespace=cluster_configuration.namespace,
            configuration_name=cluster_configuration.configuration_name,
            result_path='$.ClusterConfiguration',
        )
        load_cluster_configuration.add_catch(fail,
                                             errors=['States.ALL'],
                                             result_path='$.Error')

        # Create Task for overriding cluster configurations
        override_cluster_configs = emr_tasks.OverrideClusterConfigsBuilder.build(
            self,
            'OverrideClusterConfigsTask',
            override_cluster_configs_lambda=override_cluster_configs_lambda,
            allowed_cluster_config_overrides=self.
            _allowed_cluster_config_overrides,
            input_path='$.ClusterConfiguration.Cluster',
            result_path='$.ClusterConfiguration.Cluster',
        )
        # Attach an error catch to the Task
        override_cluster_configs.add_catch(fail,
                                           errors=['States.ALL'],
                                           result_path='$.Error')

        # Create Task to conditionally fail if a cluster with this name is already
        # running, based on user input
        fail_if_cluster_running = emr_tasks.FailIfClusterRunningBuilder.build(
            self,
            'FailIfClusterRunningTask',
            default_fail_if_cluster_running=default_fail_if_cluster_running,
            input_path='$.ClusterConfiguration.Cluster',
            result_path='$.ClusterConfiguration.Cluster',
        )
        # Attach an error catch to the task
        fail_if_cluster_running.add_catch(fail,
                                          errors=['States.ALL'],
                                          result_path='$.Error')

        # Create a Task for updating the cluster tags at runtime
        update_cluster_tags = emr_tasks.UpdateClusterTagsBuilder.build(
            self,
            'UpdateClusterTagsTask',
            input_path='$.ClusterConfiguration.Cluster',
            result_path='$.ClusterConfiguration.Cluster',
        )
        # Attach an error catch to the Task
        update_cluster_tags.add_catch(fail,
                                      errors=['States.ALL'],
                                      result_path='$.Error')

        # Create a Task to create the cluster
        if cluster_configuration.secret_configurations is None and emr_profile.kerberos_attributes_secret is None:
            # Use a the standard Step Functions/EMR integration to create the cluster
            create_cluster = emr_tasks.CreateClusterBuilder.build(
                self,
                'CreateClusterTask',
                roles=emr_profile.roles,
                input_path='$.ClusterConfiguration.Cluster',
                result_path='$.LaunchClusterResult',
                wait_for_cluster_start=wait_for_cluster_start,
            )
        else:
            # Use the RunJobFlow Lambda to create the cluster to avoid exposing the
            # SecretConfigurations and KerberosAttributes values
            create_cluster = emr_tasks.RunJobFlowBuilder.build(
                self,
                'CreateClusterTask',
                roles=emr_profile.roles,
                kerberos_attributes_secret=emr_profile.
                kerberos_attributes_secret,
                secret_configurations=cluster_configuration.
                secret_configurations,
                input_path='$.ClusterConfiguration',
                result_path='$.LaunchClusterResult',
                wait_for_cluster_start=wait_for_cluster_start,
            )

        # Attach an error catch to the Task
        create_cluster.add_catch(fail,
                                 errors=['States.ALL'],
                                 result_path='$.Error')

        success = emr_chains.Success(
            self,
            'SuccessChain',
            message=sfn.TaskInput.from_data_at('$.LaunchClusterResult'),
            subject='Launch EMR Config Succeeded',
            topic=success_topic,
            output_path='$')

        definition = sfn.Chain \
            .start(load_cluster_configuration) \
            .next(override_cluster_configs) \
            .next(fail_if_cluster_running) \
            .next(update_cluster_tags) \
            .next(create_cluster) \
            .next(success)

        self._state_machine = sfn.StateMachine(
            self,
            'StateMachine',
            state_machine_name=f'{namespace}_{launch_function_name}',
            definition=definition)

        self._ssm_parameter = ssm.CfnParameter(
            self,
            'SSMParameter',
            type='String',
            value=json.dumps(self.to_json()),
            tier='Intelligent-Tiering',
            name=f'{SSM_PARAMETER_PREFIX}/{namespace}/{launch_function_name}')
Пример #11
0
environment_name = f"{os.environ.get('ENVIRONMENT', 'dev')}"
base_domain_name = os.environ.get("DOMAIN_NAME", "mysite.com")
# if the the production environent subdomain should nott be included in the URL
# redefine `full_domain_name` to `base_domain_name` for that environment
full_domain_name = f"{environment_name}.{base_domain_name}"  # dev.mysite.com
if environment_name == "app":
    full_domain_name = base_domain_name
base_app_name = os.environ.get("APP_NAME", "mysite-com")
full_app_name = f"{environment_name}-{base_app_name}"  # dev-mysite-com
aws_region = os.environ.get("AWS_DEFAULT_REGION", "us-east-1")

app = core.App()
stack = ApplicationStack(
    app,
    f"{full_app_name}-stack",
    environment_name=environment_name,
    base_domain_name=base_domain_name,
    full_domain_name=full_domain_name,
    base_app_name=base_app_name,
    full_app_name=full_app_name,
    env={"region": aws_region},
)

# in order to be able to tag ECS resources, you need to go to
# the ECS Console > Account Settings > Amazon ECS ARN and resource ID settings
# and enable at least Service and Task. Optionally enable
# CloudWatch Container Insights
stack.node.apply_aspect(core.Tag("StackName", full_app_name))

app.synth()
Пример #12
0
    aws_ec2 as ec2,
    core,
)

class LambdaNumeralsStack(core.Stack):
    def __init__(self, app: core.App, id: str) -> None:
        super().__init__(app, id)

        vpc = ec2.Vpc(self, "NumeralsVpc")

        lambdaFn = lambda_.Function(
            self, "NumeralConverterHandler",
            code=lambda_.Code.asset('lambda'),
            handler="numeral-converter.main",
            timeout=core.Duration.seconds(300),
            runtime=lambda_.Runtime.PYTHON_3_7,
            vpc=vpc,
        )

        api = apigateway.LambdaRestApi(
            self, "lambdaNumerals", handler=lambdaFn, proxy=False,)

        items = api.root.add_resource('numerals')
        item = items.add_resource('{numerals+}')
        item.add_method('GET')

app = core.App()
stack = LambdaNumeralsStack(app, "LambdaNumerals")
stack.node.apply_aspect(core.Tag('Owner','Gregg Anderson'))
app.synth()
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        eks_vpc = ec2.Vpc(self, "VPC", cidr="10.0.0.0/16")

        self.node.apply_aspect(
            core.Tag("kubernetes.io/cluster/cluster", "shared"))

        eks_vpc.private_subnets[0].node.apply_aspect(
            core.Tag("kubernetes.io/role/internal-elb", "1"))
        eks_vpc.private_subnets[1].node.apply_aspect(
            core.Tag("kubernetes.io/role/internal-elb", "1"))
        eks_vpc.public_subnets[0].node.apply_aspect(
            core.Tag("kubernetes.io/role/elb", "1"))
        eks_vpc.public_subnets[1].node.apply_aspect(
            core.Tag("kubernetes.io/role/elb", "1"))

        # Create IAM Role For CodeBuild and Cloud9
        codebuild_role = iam.Role(
            self,
            "BuildRole",
            assumed_by=iam.CompositePrincipal(
                iam.ServicePrincipal("codebuild.amazonaws.com"),
                iam.ServicePrincipal("ec2.amazonaws.com")),
            managed_policies=[
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    "AdministratorAccess")
            ])

        instance_profile = iam.CfnInstanceProfile(
            self, "InstanceProfile", roles=[codebuild_role.role_name])

        # Create CodeBuild PipelineProject
        build_project = codebuild.PipelineProject(
            self,
            "BuildProject",
            role=codebuild_role,
            build_spec=codebuild.BuildSpec.from_source_filename(
                "aws-infrastructure/buildspec.yml"))

        # Create CodePipeline
        pipeline = codepipeline.Pipeline(
            self,
            "Pipeline",
        )

        # Create Artifact
        artifact = codepipeline.Artifact()

        # Add Source Stage
        pipeline.add_stage(
            stage_name="Source",
            actions=[
                codepipeline_actions.GitHubSourceAction(
                    action_name="SourceCodeRepo",
                    owner="jasonumiker",
                    repo="k8s-plus-aws-gitops",
                    output=artifact,
                    oauth_token=core.SecretValue.secrets_manager(
                        "github-token"),
                    trigger=codepipeline_actions.GitHubTrigger.NONE)
            ])

        # Add CodeBuild Stage
        pipeline.add_stage(
            stage_name="Deploy",
            actions=[
                codepipeline_actions.CodeBuildAction(
                    action_name="CodeBuildProject",
                    project=build_project,
                    type=codepipeline_actions.CodeBuildActionType.BUILD,
                    input=artifact,
                    environment_variables={
                        'PublicSubnet1ID':
                        codebuild.BuildEnvironmentVariable(
                            value=eks_vpc.public_subnets[0].subnet_id),
                        'PublicSubnet2ID':
                        codebuild.BuildEnvironmentVariable(
                            value=eks_vpc.public_subnets[1].subnet_id),
                        'PrivateSubnet1ID':
                        codebuild.BuildEnvironmentVariable(
                            value=eks_vpc.private_subnets[0].subnet_id),
                        'PrivateSubnet2ID':
                        codebuild.BuildEnvironmentVariable(
                            value=eks_vpc.private_subnets[1].subnet_id),
                        'AWS_DEFAULT_REGION':
                        codebuild.BuildEnvironmentVariable(value=self.region),
                        'INSTANCEPROFILEID':
                        codebuild.BuildEnvironmentVariable(
                            value=instance_profile.ref)
                    })
            ])

        cloud9_repository = cloud9.CfnEnvironmentEC2.RepositoryProperty(
            path_component="k8s-plus-aws-gitops",
            repository_url="https://github.com/jasonumiker/k8s-plus-aws-gitops"
        )

        cloud9_instance = cloud9.CfnEnvironmentEC2(
            self,
            'Cloud9Instance',
            instance_type="t2.micro",
            automatic_stop_time_minutes=30,
            subnet_id=eks_vpc.public_subnets[0].subnet_id,
            repositories=[cloud9_repository])

        pipeline.node.add_dependency(eks_vpc)
        pipeline.node.add_dependency(cloud9_instance)
        cloud9_instance.node.add_dependency(eks_vpc)
Пример #14
0
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        self.vpc = ec2.Vpc(
            self,
            id="VPC",
            cidr="10.0.0.0/16",
            max_azs=2,
            nat_gateways=1,
            subnet_configuration=[
                ec2.SubnetConfiguration(name="public",
                                        cidr_mask=24,
                                        reserved=False,
                                        subnet_type=ec2.SubnetType.PUBLIC),
                ec2.SubnetConfiguration(name="private",
                                        cidr_mask=24,
                                        reserved=False,
                                        subnet_type=ec2.SubnetType.PRIVATE),
                ec2.SubnetConfiguration(name="DB",
                                        cidr_mask=24,
                                        reserved=False,
                                        subnet_type=ec2.SubnetType.ISOLATED),
                # ec2.SubnetConfiguration(
                #     name="DB2", cidr_mask=24,
                #     reserved=False, subnet_type=ec2.SubnetType.ISOLATED
                # )
            ],
            enable_dns_hostnames=True,
            enable_dns_support=True)

        core.Tag(key="Application", value=self.stack_name) \
            .add(self.vpc, key="Application", value=self.stack_name)
        # core.Tag("Network", "Public").add(vpc)
        # core.Tag("Name", "VPCName-Environment").add(vpc)
        # core.Tag("Environment", "Environment").add(vpc)

        bastion = ec2.BastionHostLinux(
            self,
            id="BastionHost",
            vpc=self.vpc,
            instance_name="BastionHost",
            instance_type=ec2.InstanceType(ec2_type),
            subnet_selection=ec2.SubnetSelection(
                subnet_type=ec2.SubnetType.PUBLIC))
        bastion.allow_ssh_access_from(ec2.Peer.any_ipv4())

        # Setup key_name for EC2 instance login if you don't use Session Manager
        #bastion.instance.instance.add_property_override("KeyName", key_name)

        ec2.CfnEIP(self,
                   id="BastionHostEIP",
                   domain="vpc",
                   instance_id=bastion.instance_id)

        core.CfnOutput(
            self,
            id="VPCId",
            value=self.vpc.vpc_id,
            description="VPC ID",
            export_name=f"{self.region}:{self.account}:{self.stack_name}:vpc-id"
        )

        core.CfnOutput(
            self,
            id="BastionPrivateIP",
            value=bastion.instance_private_ip,
            description="BASTION Private IP",
            export_name=
            f"{self.region}:{self.account}:{self.stack_name}:bastion-private-ip"
        )

        core.CfnOutput(
            self,
            id="BastionPublicIP",
            value=bastion.instance_public_ip,
            description="BASTION Public IP",
            export_name=
            f"{self.region}:{self.account}:{self.stack_name}:bastion-public-ip"
        )
    def __init__(self, scope: core.Construct, id: str, vpc_ip: str,
                 **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        if vpc_ip:
            print("VPC_IP " + vpc_ip)
            cidr = vpc_ip
        else:
            cidr = "10.0.0.0/16"

        # 01. VPC 생성
        cfVpc = ec2.CfnVPC(self,
                           "VPC",
                           cidr_block=cidr,
                           tags=[core.Tag(key="Name", value="CDKVPC")])

        # 02. Subnet 생성
        subnet_2a = ec2.CfnSubnet(
            self,
            id="subnet_2a",
            availability_zone="ap-northeast-2a",
            cidr_block="100.0.1.0/24",
            map_public_ip_on_launch=True,
            vpc_id=cfVpc.ref,
            tags=[core.Tag(key="Name", value="subnet-2a")])

        subnet_2c = ec2.CfnSubnet(
            self,
            id="subnet_2c",
            availability_zone="ap-northeast-2c",
            cidr_block="100.0.2.0/24",
            map_public_ip_on_launch=True,
            vpc_id=cfVpc.ref,
            tags=[core.Tag(key="Name", value="subnet-2c")])

        # 03. Internet Gateway 생성
        internet_gateway = ec2.CfnInternetGateway(
            self,
            id="Internet_Gateway_DNS_Example",
            tags=[core.Tag(key="Name", value="Internet_Gateway_for_DNS")])

        # 04. Internat Gateway Attach
        ec2.CfnVPCGatewayAttachment(self,
                                    id="vpcgw",
                                    vpc_id=cfVpc.ref,
                                    internet_gateway_id=internet_gateway.ref)

        #05. Route Table 생성
        route_table = ec2.CfnRouteTable(
            self,
            id="dns_example_routetable",
            vpc_id=cfVpc.ref,
            tags=[core.Tag(key="Name", value="Route_for_DNS")])

        #Route
        ec2.CfnRoute(self,
                     id="IGW_Route",
                     route_table_id=route_table.ref,
                     destination_cidr_block="0.0.0.0/0",
                     gateway_id=internet_gateway.ref)

        ec2.CfnSubnetRouteTableAssociation(self,
                                           id="DnsSubnet_Associate_2a",
                                           route_table_id=route_table.ref,
                                           subnet_id=subnet_2a.ref)

        ec2.CfnSubnetRouteTableAssociation(self,
                                           id="DnsSubnet_Associate_2c",
                                           route_table_id=route_table.ref,
                                           subnet_id=subnet_2c.ref)

        # 03. SG 생성
        sg = ec2.CfnSecurityGroup(self,
                                  id="sg-ssh",
                                  vpc_id=cfVpc.ref,
                                  group_description="Default Group",
                                  tags=[core.Tag(key="Name", value="DNS_SG")])
        #security_group_ingress=[ingress_ssh])
        #security_group_egress=[egress_all])

        ingress_ssh = ec2.CfnSecurityGroupIngress(self,
                                                  "SSH",
                                                  ip_protocol="tcp",
                                                  group_id=sg.ref,
                                                  from_port=22,
                                                  to_port=22,
                                                  cidr_ip="0.0.0.0/0")

        egress_all = ec2.CfnSecurityGroupEgress(
            self,
            id="OUTBOUND",
            group_id=sg.ref,
            ip_protocol="-1",
            #from_port=0,
            #to_port=65535,
            cidr_ip="0.0.0.0/0")

        # 04. DNS Server EC2 생성
        dns_server = ec2.MachineImage.generic_linux(
            {"ap-northeast-2": "ami-00d293396a942208d"})

        ec2.CfnInstance(self,
                        id="dns_master",
                        image_id=dns_server.get_image(self).image_id,
                        instance_type="t2.small",
                        key_name="SeoulRegion",
                        security_group_ids=[sg.ref],
                        subnet_id=subnet_2a.ref,
                        tags=[{
                            "key": "Name",
                            "value": "dns_master"
                        }])

        ec2.CfnInstance(self,
                        id="dns_slave",
                        image_id=dns_server.get_image(self).image_id,
                        instance_type="t2.small",
                        key_name="SeoulRegion",
                        security_group_ids=[sg.ref],
                        subnet_id=subnet_2c.ref,
                        tags=[{
                            "key": "Name",
                            "value": "dns_slave"
                        }])
    def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
        super().__init__(scope, id, **kwargs)

        eks_vpc = ec2.Vpc(self, "VPC", cidr="10.0.0.0/16")

        self.node.apply_aspect(
            core.Tag("kubernetes.io/cluster/cluster", "shared"))

        eks_vpc.private_subnets[0].node.apply_aspect(
            core.Tag("kubernetes.io/role/internal-elb", "1"))
        eks_vpc.private_subnets[1].node.apply_aspect(
            core.Tag("kubernetes.io/role/internal-elb", "1"))
        eks_vpc.public_subnets[0].node.apply_aspect(
            core.Tag("kubernetes.io/role/elb", "1"))
        eks_vpc.public_subnets[1].node.apply_aspect(
            core.Tag("kubernetes.io/role/elb", "1"))

        # Create IAM Role For CodeBuild and Cloud9
        codebuild_role = iam.Role(
            self,
            "BuildRole",
            assumed_by=iam.CompositePrincipal(
                iam.ServicePrincipal("codebuild.amazonaws.com"),
                iam.ServicePrincipal("ec2.amazonaws.com")),
            managed_policies=[
                iam.ManagedPolicy.from_aws_managed_policy_name(
                    "AdministratorAccess")
            ])

        instance_profile = iam.CfnInstanceProfile(
            self, "InstanceProfile", roles=[codebuild_role.role_name])

        # Create CodeBuild PipelineProject
        build_project = codebuild.PipelineProject(
            self,
            "BuildProject",
            role=codebuild_role,
            build_spec=codebuild.BuildSpec.from_source_filename(
                "buildspec.yml"))

        # Create CodePipeline
        pipeline = codepipeline.Pipeline(
            self,
            "Pipeline",
        )

        # Create Artifact
        artifact = codepipeline.Artifact()

        # S3 Source Bucket
        source_bucket = s3.Bucket.from_bucket_attributes(
            self,
            "SourceBucket",
            bucket_arn=core.Fn.join(
                "",
                ["arn:aws:s3:::ee-assets-prod-",
                 core.Fn.ref("AWS::Region")]))

        # Add Source Stage
        pipeline.add_stage(
            stage_name="Source",
            actions=[
                codepipeline_actions.S3SourceAction(
                    action_name="S3SourceRepo",
                    bucket=source_bucket,
                    bucket_key=
                    "modules/2cae1f20008d4fc5aaef294602649b98/v9/source.zip",
                    output=artifact,
                    trigger=codepipeline_actions.S3Trigger.NONE)
            ])

        # Add CodeBuild Stage
        pipeline.add_stage(
            stage_name="Deploy",
            actions=[
                codepipeline_actions.CodeBuildAction(
                    action_name="CodeBuildProject",
                    project=build_project,
                    type=codepipeline_actions.CodeBuildActionType.BUILD,
                    input=artifact,
                    environment_variables={
                        'PublicSubnet1ID':
                        codebuild.BuildEnvironmentVariable(
                            value=eks_vpc.public_subnets[0].subnet_id),
                        'PublicSubnet2ID':
                        codebuild.BuildEnvironmentVariable(
                            value=eks_vpc.public_subnets[1].subnet_id),
                        'PrivateSubnet1ID':
                        codebuild.BuildEnvironmentVariable(
                            value=eks_vpc.private_subnets[0].subnet_id),
                        'PrivateSubnet2ID':
                        codebuild.BuildEnvironmentVariable(
                            value=eks_vpc.private_subnets[1].subnet_id),
                        'AWS_DEFAULT_REGION':
                        codebuild.BuildEnvironmentVariable(value=self.region),
                        'INSTANCEPROFILEID':
                        codebuild.BuildEnvironmentVariable(
                            value=instance_profile.ref),
                        'AWS_ACCOUNT_ID':
                        codebuild.BuildEnvironmentVariable(value=self.account)
                    })
            ])

        cloud9_stack = cloudformation.CfnStack(
            self,
            "Cloud9Stack",
            #            template_url="https://aws-quickstart.s3.amazonaws.com/quickstart-cloud9-ide/templates/cloud9-ide-instance.yaml",
            template_url=
            "https://ee-assets-prod-us-east-1.s3.amazonaws.com/modules/2cae1f20008d4fc5aaef294602649b98/v9/cloud9-ide-instance.yaml",
            parameters={
                "C9InstanceType": "m5.large",
                "C9Subnet": eks_vpc.public_subnets[0].subnet_id
            })

        pipeline.node.add_dependency(eks_vpc)
        pipeline.node.add_dependency(cloud9_stack)
Пример #17
0
                       account=os.environ["CDK_DEFAULT_ACCOUNT"],
                       region=os.environ["CDK_DEFAULT_REGION"]))

# Load our SSE-KMS EMR Profile created in the emr_profiles example
sse_kms_profile = emr_profile.EMRProfile.from_stored_profile(
    stack, 'EMRProfile', 'sse-kms-profile')

# Load our Basic Cluster Configuration created in the cluster_configurations example
cluster_config = cluster_configuration.ClusterConfiguration.from_stored_configuration(
    stack, 'ClusterConfiguration', 'basic-instance-group-cluster')

# Create a new State Machine to launch a cluster with the Basic configuration
# Unless specifically indicated, fail to start if a cluster of the same name
# is already running. Allow any parameter in the default override_interface to
# be overwritten.
launch_function = emr_launch_function.EMRLaunchFunction(
    stack,
    'EMRLaunchFunction',
    launch_function_name='launch-basic-cluster',
    cluster_configuration=cluster_config,
    emr_profile=sse_kms_profile,
    cluster_name='basic-cluster',
    default_fail_if_cluster_running=True,
    allowed_cluster_config_overrides=cluster_config.
    override_interfaces['default'],
    cluster_tags=[core.Tag('Key1', 'Value1'),
                  core.Tag('Key2', 'Value2')],
    wait_for_cluster_start=True)

app.synth()