def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # create VPC w/ public and private subnets in 2 AZs # this also creates NAT Gateways in our public subnets vpc = ec2.Vpc(self, "NAT_Vpc", max_azs=2) # define the IAM role that will allow the EC2 instance to communicate with SSM role = iam.Role(self, "Role", assumed_by=iam.ServicePrincipal("ec2.amazonaws.com")) # arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore role.add_managed_policy( iam.ManagedPolicy( self, id='mp', managed_policy_name='AmazonSSMManagedInstanceCore', statements=[ iam.PolicyStatement(actions=['*'], resources=['*']) ])) # define user data script to update server software ssma_user_data = ec2.UserData.for_linux() ssma_user_data.add_commands('sudo yum update -y') # define user data script to create metadata.sh script ssma_user_data.add_commands('sudo touch metadata.sh') ssma_user_data.add_commands('sudo chmod 777 metadata.sh') ssma_user_data.add_commands( "sudo echo 'curl http://169.254.169.254/latest/meta-data/$1' > metadata.sh" ) ssma_user_data.add_commands("sudo echo 'VAR=' >> metadata.sh") ssma_user_data.add_commands("sudo echo 'echo $VAR' >> metadata.sh") # launch an EC2 instance in one of the private subnets instance = ec2.Instance( self, "PrivateInstance", vpc=vpc, instance_type=ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE2, ec2.InstanceSize.MICRO), machine_image=ec2.AmazonLinuxImage(), vpc_subnets={'subnet_type': ec2.SubnetType.PRIVATE}, role=role, user_data=ssma_user_data) # launch an EC2 instance in one of the public subnets instance = ec2.Instance( self, "PublicInstance", vpc=vpc, instance_type=ec2.InstanceType.of(ec2.InstanceClass.BURSTABLE2, ec2.InstanceSize.MICRO), machine_image=ec2.AmazonLinuxImage(), vpc_subnets={'subnet_type': ec2.SubnetType.PUBLIC}, role=role, user_data=ssma_user_data)
def __init__(self, scope: core.Construct, id: str, props, **kwargs) -> None: super().__init__(scope, id, **kwargs) vpc = props['vpc'] internal_sg = props['internal_sg'] bastion_sg = props['bastion_sg'] # Bastion用Linux bastion_linux = ec2.Instance( self, 'BastionLinux', instance_type=ec2.InstanceType('t3.micro'), machine_image=ec2.MachineImage.latest_amazon_linux( generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX_2), key_name=self.node.try_get_context('key_name'), vpc=vpc, vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC), security_group=internal_sg ) bastion_linux.role.add_managed_policy( iam.ManagedPolicy.from_aws_managed_policy_name('AmazonSSMManagedInstanceCore')) bastion_linux.add_security_group(bastion_sg) # Bastion用Windows bastion_windows = ec2.Instance( self, 'BastionWindows', instance_type=ec2.InstanceType('t3.large'), machine_image=ec2.MachineImage.latest_windows( version=ec2.WindowsVersion.WINDOWS_SERVER_2016_JAPANESE_FULL_BASE), key_name=self.node.try_get_context('key_name'), vpc=vpc, vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC), security_group=internal_sg ) bastion_windows.role.add_managed_policy( iam.ManagedPolicy.from_aws_managed_policy_name('AmazonSSMManagedInstanceCore')) bastion_windows.add_security_group(bastion_sg) # Radius用EC2ホスト radius_host = ec2.Instance( self, 'RadiusHost', instance_type=ec2.InstanceType('t3.small'), machine_image=ec2.MachineImage.latest_amazon_linux( generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX_2), key_name=self.node.try_get_context('key_name'), vpc=vpc, vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PRIVATE), security_group=internal_sg ) radius_host.role.add_managed_policy( iam.ManagedPolicy.from_aws_managed_policy_name('AmazonSSMManagedInstanceCore')) self.output_props = props.copy()
def createResources(self, ns): # Database EC2 Instance # AMI neo4j_4 = ec2.MachineImage.generic_linux( {"us-east-1": self.config[ns]['database_ami_id']}) # User Data Script initFile = open("aws/scripts/db_init.sh") initScript = initFile.read() initFile.close() # Instance self.DBInstance = ec2.Instance( self, "Database Instance", instance_type=ec2.InstanceType( self.config[ns]['database_instance_type']), machine_image=neo4j_4, key_name=self.config[ns]['ssh_key_name'], vpc=self.bentoVPC, role=self.ecsInstanceRole) self.DBInstance.add_user_data(initScript) core.Tags.of(self.DBInstance).add("Name", "{}-neo4j".format(ns)) # Update DB Security Group dbsg = self.DBInstance.connections.security_groups[0] dbsg.add_ingress_rule(self.ecssg, ec2.Port.tcp(7474)) dbsg.add_ingress_rule(self.ecssg, ec2.Port.tcp(7687)) dbsg.add_ingress_rule(self.bastionsg, ec2.Port.tcp(22))
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # The code that defines your stack goes here vpc = ec2.Vpc.from_lookup(self, "VPC", vpc_id=vpc_id) host = ec2.Instance(self, "myEC2", instance_type=ec2.InstanceType( instance_type_identifier=ec2_type), instance_name="mySingleHost", machine_image=linux_ami, vpc=vpc, key_name=key_name, vpc_subnets=ec2.SubnetSelection( subnet_type=ec2.SubnetType.PUBLIC), # user_data=ec2.UserData.custom(user_data) ) # ec2.Instance has no property of BlockDeviceMappings, add via lower layer cdk api: host.instance.add_property_override("BlockDeviceMappings", [{ "DeviceName": "/dev/xvda", "Ebs": { "VolumeSize": "10", "VolumeType": "io1", "Iops": "150", "DeleteOnTermination": "true" } # }, { # "DeviceName": "/dev/sdb", # "Ebs": {"VolumeSize": "30"} } ]) # by default VolumeType is gp2, VolumeSize 8GB host.connections.allow_from_any_ipv4( ec2.Port.tcp(22), "Allow ssh from internet") host.connections.allow_from_any_ipv4( ec2.Port.tcp(80), "Allow ssh from internet")
def __init__(self, scope: core.Construct, id: str, vpc: ec2.Vpc, sg: ec2.ISecurityGroup, stage={}, **kwargs) -> None: super().__init__(scope, id, **kwargs) prefix_name = f'{stage["vpc_prefix"]}-{stage["stage_name"]}-{self.node.try_get_context("customer")}' bastion_host = ec2.Instance( self, f'{prefix_name}-public-bastion', instance_type=ec2.InstanceType('t3.micro'), machine_image=ec2.AmazonLinuxImage( edition=ec2.AmazonLinuxEdition.STANDARD, generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX_2, virtualization=ec2.AmazonLinuxVirt.HVM, storage=ec2.AmazonLinuxStorage.GENERAL_PURPOSE), vpc=vpc, key_name=stage["key_name"], vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC), security_group=sg) core.Tags.of(bastion_host).add("Name", f'{prefix_name}-public-bastion') core.CfnOutput(self, 'my-bastion-id', value=bastion_host.instance_id)
def __init__(self, scope: core.Construct, id: str, vpc, KeyPairName, ec2_type, **kwargs) -> None: super().__init__(scope, id, **kwargs) self.role = iam.Role( self, 'ec2-bastion-role', assumed_by=iam.ServicePrincipal('ec2.amazonaws.com')) #Grant permission to access the MAD secret self.role.add_managed_policy( policy=iam.ManagedPolicy.from_aws_managed_policy_name( 'SecretsManagerReadWrite')) # Create Bastion self.bastion = ec2.Instance( self, id, instance_type=ec2.InstanceType(instance_type_identifier=ec2_type), machine_image=windows_ami, vpc=vpc, user_data=ec2.UserData.custom(user_data), key_name=KeyPairName, role=self.role, vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC)) self.bastion.connections.allow_from_any_ipv4(ec2.Port.tcp(3389), "Internet access RDP") core.CfnOutput(self, "Bastion Host", value=self.bastion.instance_public_dns_name)
def __init__(self, scope: cdk.Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) vpc = ec2.Vpc.from_lookup(self, "vpc", vpc_id=vpcID) # create a new security group sec_group = ec2.SecurityGroup(self, "sec-group-allow-http", vpc=vpc, allow_all_outbound=True) # add a new ingress rule to allow port 8080 to internal hosts sec_group.add_ingress_rule(peer=ec2.Peer.ipv4('0.0.0.0/0'), description="Allow HTTP connection", connection=ec2.Port.tcp(8080)) # define a new ec2 instance ec2_instance = ec2.Instance( self, "ec2-instance", instance_name=instanceName, instance_type=ec2.InstanceType(instanceType), machine_image=ec2.MachineImage().lookup(name=amiName), vpc=vpc, security_group=sec_group, user_data=user_data)
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) vpc = _ec2.Vpc.from_lookup(self, "importedVPC", vpc_id="vpc-d0a193aa") # Read BootStrap Script with open("bootstrap_scripts/install_httpd.sh", mode="r") as file: user_data = file.read() # WebServer Instance 001 web_server = _ec2.Instance( self, "WebServer001Id", instance_type=_ec2.InstanceType( instance_type_identifier="t2.micro"), instance_name="WebServer001", machine_image=_ec2.MachineImage.generic_linux( {"us-east-1": "ami-0fc61db8544a617ed"}), vpc=vpc, vpc_subnets=_ec2.SubnetSelection( subnet_type=_ec2.SubnetType.PUBLIC), user_data=_ec2.UserData.custom(user_data)) output_1 = core.CfnOutput( self, "webServer001Ip", description="WebServer Public Ip Address", value=f"http://{web_server.instance_public_ip}") # Allow Web Traffic to WebServer web_server.connections.allow_from_any_ipv4( _ec2.Port.tcp(80), description="Allow Web Traffic")
def __init__(self, scope: core.Construct, id: str, vpc: ec2.Vpc, **kwargs) -> None: super().__init__(scope, id, **kwargs) # The code that defines your stack goes here self.sg = ec2.SecurityGroup(self, 'bastionSG', vpc=vpc, security_group_name='BastionHostSG', description="SG for Public Access", allow_all_outbound=True) self.sg.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(22), "SSH Access") host = ec2.Instance( self, "bastion", instance_type=ec2.InstanceType( instance_type_identifier="t2.micro"), instance_name="bastion", machine_image=linux_ami, vpc=vpc, key_name=key_name, vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC), security_group=self.sg) #host.connections.allow_from_any_ipv4( # ec2.Port.tcp(22),"SSH Access" #) core.CfnOutput(self, 'bastionhost', value=self.sg.security_group_id)
def __init__(self, scope: core.Construct, id: str, vpc, config, **kwargs) -> None: super().__init__(scope, id, **kwargs) prj_name = self.node.try_get_context("project_name") env_name = self.node.try_get_context("env") name = config['ec2']['name'] key = config['ec2']['ssh_key'] ubuntu_ami = ec2.GenericLinuxImage( {"ap-southeast-1": "ami-028be27cf930f7a43"}) # Create bastion host self.bastion = ec2.Instance( self, 'Instance', instance_type=ec2.InstanceType("t3.small"), instance_name=f"{name}-bastion", key_name=f"{key}", machine_image=ubuntu_ami, vpc=vpc, vpc_subnets=ec2.SubnetSelection( subnet_type=ec2.SubnetType.PRIVATE), ) self.bastion.apply_removal_policy(core.RemovalPolicy.DESTROY) self.bastion.connections.allow_from_any_ipv4( port_range=ec2.Port.tcp(22), description='Allow public SSH connections') self.bastion.connections.allow_from_any_ipv4( port_range=ec2.Port.icmp_ping(), description='Allow public ICMP ping') core.CfnOutput(self, f'{name}-private-ip', value=self.bastion.instance_private_ip)
def __init__(self, scope: cdk.Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) # The code that defines your stack goes here # filter = { 'description': ['*AdditionalSoftware=Vanilla'] } # filter = { 'root-device-type': ['*ebs*'] } filter = {'description': ['*ClouEndureWorkshop-webserver']} # filter = { 'description': ['02h-ClouEndureWorkshop-webserver'] } # filter = { 'description': ['*-ClouEndureWorkshop-webserver'] } # image = ec2.MachineImage.lookup(name='*CentOS*', filters=filter) image = ec2.MachineImage.lookup(name='*webserver', filters=filter) default_vpc = ec2.Vpc.from_lookup(self, 'default-vpc', is_default=True) ec2_instance = ec2.Instance(self, "EC2", machine_image=image, vpc=default_vpc, instance_name='Bastion Host', instance_type=ec2.InstanceType( instance_type_identifier='t3.micro'))
def __init__(self, scope: core.Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) #import vpc info vpc = aws_ec2.Vpc.from_lookup(self, "vpc", vpc_id="vpc-1f39b977") #import user-data scripts with open("userdata_scripts/setup.sh", mode="r") as file: user_data = file.read() #get latest ami from any region aws_linux_ami = aws_ec2.MachineImage.latest_amazon_linux( generation=aws_ec2.AmazonLinuxGeneration.AMAZON_LINUX_2, edition=aws_ec2.AmazonLinuxEdition.STANDARD, storage=aws_ec2.AmazonLinuxStorage.EBS, virtualization=aws_ec2.AmazonLinuxVirt.HVM) #ec2 test_server = aws_ec2.Instance( self, "ec2id", instance_type=aws_ec2.InstanceType( instance_type_identifier="t2.micro"), instance_name="TestServer01", machine_image=aws_linux_ami, vpc=vpc, vpc_subnets=aws_ec2.SubnetSelection( subnet_type=aws_ec2.SubnetType.PUBLIC), key_name="SAA-C01", user_data=aws_ec2.UserData.custom(user_data)) #add custom ebs for ec2 test_server.instance.add_property_override( "BlockDeviceMappings", [{ "DeviceName": "/dev/sdb", "Ebs": { "VolumeSize": "10", "VolumeType": "io1", "Iops": "100", "DeleteOnTermination": "true" } }]) #allow web traffic test_server.connections.allow_from_any_ipv4( aws_ec2.Port.tcp(80), description="allow web traffic") # add permission to instances profile test_server.role.add_managed_policy( aws_iam.ManagedPolicy.from_aws_managed_policy_name( "AmazonSSMManagedInstanceCore")) test_server.role.add_managed_policy( aws_iam.ManagedPolicy.from_aws_managed_policy_name( "AmazonS3ReadOnlyAccess")) output_server_ip = core.CfnOutput(self, "serverip01", description="test server ip", value=test_server.instance_public_ip)
def add_devbox(self): """ Create single node for development """ self.devbox = ec2.Instance(self,'DevBox', instance_type=ec2.InstanceType('t2.medium'), machine_image= ec2.MachineImage.latest_amazon_linux( cpu_type=ec2.AmazonLinuxCpuType.X86_64, storage= ec2.AmazonLinuxStorage.GENERAL_PURPOSE ), vpc=self.datalake.vpc, allow_all_outbound=True) if self.datalake.efs.file_system_id == None: raise AssertionError('No filesystem id present') self.devbox.add_user_data( "yum check-update -y", "yum upgrade -y", "yum install -y amazon-efs-utils nfs-utils docker", "service docker start", "file_system_id_1=" + self.datalake.efs.file_system_id, "efs_mount_point_1=/mnt/efs/", "mkdir -p \"${efs_mount_point_1}\"", "test -f \"/sbin/mount.efs\" && echo \"${file_system_id_1}:/ ${efs_mount_point_1} efs defaults,_netdev\" >> /etc/fstab || " + "echo \"${file_system_id_1}.efs." + core.Stack.of(self).region + ".amazonaws.com:/ ${efs_mount_point_1} nfs4 nfsvers=4.1,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2,noresvport,_netdev 0 0\" >> /etc/fstab", "mount -a -t efs,nfs4 defaults" ) for policy in [ 'AmazonSSMManagedInstanceCore', 'AmazonS3FullAccess', 'AWSCodeCommitFullAccess', 'AmazonCodeGuruReviewerFullAccess', 'AmazonEC2ContainerRegistryPowerUser']: self.devbox.role.add_managed_policy( iam.ManagedPolicy.from_aws_managed_policy_name(policy))
def __init__(self, scope: core.Construct, id: str, vpc: ec2.Vpc, **kwargs) -> None: super().__init__(scope, id, **kwargs) # Set the AMI to the latest Amazon Linux 2 amazon_linux_2_ami = ec2.MachineImage.latest_amazon_linux( generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX_2, edition=ec2.AmazonLinuxEdition.STANDARD, virtualization=ec2.AmazonLinuxVirt.HVM, storage=ec2.AmazonLinuxStorage.GENERAL_PURPOSE) # Create a role for the instance and attach the SSM Managed Policy to this role. instance_role = iam.Role( self, "test-instance-SSM-role", assumed_by=iam.ServicePrincipal("ec2.amazonaws.com")) instance_role.add_managed_policy( iam.ManagedPolicy.from_aws_managed_policy_name( "service-role/AmazonEC2RoleforSSM")) # Create a test instance in any available private subnet allowing all outbound connections. No inbound connections allowed. instance = ec2.Instance(self, "test-instance", instance_type=ec2.InstanceType("t3.nano"), machine_image=amazon_linux_2_ami, vpc=vpc, vpc_subnets=ec2.SubnetSelection( subnet_type=ec2.SubnetType.ISOLATED), role=instance_role, allow_all_outbound=True) core.CfnOutput(self, "output-instance-id", value=instance.instance_id)
def setup_emqx(self, N, vpc, zone, sg, key): self.emqx_vms = [] for n in range(0, N): name = "emqx-%d" % n vm = ec2.Instance(self, id = name, instance_type = ec2.InstanceType(instance_type_identifier=emqx_ins_type), machine_image = linux_ami, user_data = ec2.UserData.custom(user_data), security_group = sg, key_name = key, vpc = vpc, vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PRIVATE), ) self.emqx_vms.append(vm) r53.ARecord(self, id = name + '.int.emqx', record_name = name + '.int.emqx', zone = zone, target = r53.RecordTarget([vm.instance_private_ip]) ) # tagging if self.user_defined_tags: core.Tags.of(vm).add(*self.user_defined_tags) core.Tags.of(vm).add('service', 'emqx')
def __init__(self, scope: core.Construct, id: str, props, **kwargs) -> None: super().__init__(scope, id, **kwargs) vpc = props['vpc_dr'] internal_sg = props['internal_sg_dr'] bastion_sg = props['bastion_sg_dr'] # Bastion用EC2ホスト bastion_windows = ec2.Instance( self, 'Bastion', instance_type=ec2.InstanceType('t3.large'), machine_image=ec2.MachineImage.latest_windows( version=ec2.WindowsVersion. WINDOWS_SERVER_2016_JAPANESE_FULL_BASE), key_name=self.node.try_get_context('key_name'), vpc=vpc, vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC), security_group=internal_sg) bastion_windows.role.add_managed_policy( iam.ManagedPolicy.from_aws_managed_policy_name( 'AmazonSSMManagedInstanceCore')) bastion_windows.add_security_group(bastion_sg) self.output_props = props.copy()
def setup_etcd(self, vpc, zone, sg, key): for n in range(0, 3): # cdk bug? (cloud_user_data, )= ec2.UserData.for_linux(), cloud_user_data.add_commands('apt update', 'apt install -y etcd-server etcd-client', "echo ETCD_INITIAL_ADVERTISE_PEER_URLS=http://etcd%d.int.emqx:2380 >> /etc/default/etcd" % n, 'echo ETCD_LISTEN_PEER_URLS=http://0.0.0.0:2380 >> /etc/default/etcd', 'echo ETCD_LISTEN_CLIENT_URLS=http://0.0.0.0:2379 >> /etc/default/etcd', "echo ETCD_ADVERTISE_CLIENT_URLS=http://etcd%d.int.emqx:2379 >> /etc/default/etcd" % n, "echo ETCD_NAME=infra%d >> /etc/default/etcd" % n, 'echo ETCD_INITIAL_CLUSTER_STATE=new >> /etc/default/etcd', 'echo ETCD_INITIAL_CLUSTER_TOKEN=emqx-cluster-1 >> /etc/default/etcd', 'echo ETCD_INITIAL_CLUSTER="infra0=http://etcd0.int.emqx:2380,infra1=http://etcd1.int.emqx:2380,infra2=http://etcd2.int.emqx:2380" >> /etc/default/etcd', 'systemctl restart etcd' ) ins = ec2.Instance(self, id = "etsd.%d" % n, instance_type=ec2.InstanceType(instance_type_identifier="t3a.nano"), machine_image=linux_ami, user_data=cloud_user_data, security_group = sg, key_name=key, vpc = vpc ) r53.ARecord(self, id = "etcd%d.int.emqx" % n, record_name = "etcd%d.int.emqx" % n, zone = zone, target = r53.RecordTarget([ins.instance_private_ip]) ) if self.user_defined_tags: core.Tags.of(ins).add(*self.user_defined_tags) core.Tags.of(ins).add('service', 'etcd')
def __init__(self, scope: core.Construct, id: str, vpc: ec2.IVpc, config: dict, region: str, **kwargs): super().__init__(scope, id, **kwargs) self._region = region ### EC2 Server for Jenkins image = ec2.GenericLinuxImage( { region: config["ami_id"], }, ) self._role = iam.Role(self, "InstanceRole", assumed_by=iam.ServicePrincipal("ec2.amazonaws.com")) for policy in config["iam_role_policies"]: self._role.add_managed_policy(iam.ManagedPolicy.from_aws_managed_policy_name(policy)) subnet = vpc.select_subnets(subnet_type=ec2.SubnetType.PRIVATE).subnets[0] subnet_selection = ec2.SubnetSelection(subnets=[subnet]) self.security_group = ec2.SecurityGroup( self, "EC2SG", vpc=vpc ) self._instance = ec2.Instance( self, "EC2", instance_type=ec2.InstanceType(config["instance_type"]), machine_image=image, vpc=vpc, vpc_subnets=subnet_selection, role=self._role, security_group=self.security_group ) core.CfnOutput(self, "CodeServerInstanceID", value=self._instance.instance_id)
def __init__(self, scope: cdk.Construct, construct_id: str, **kwargs) -> None: super().__init__(scope, construct_id, **kwargs) with open("../iac-test-key.pub") as f: public_key = f.read() vpc = ec2.Vpc.from_lookup(self, "default_vpc", is_default=True) security_group = ec2.SecurityGroup( self, "test_sg", vpc=vpc, allow_all_outbound=True, ) security_group.add_ingress_rule(ec2.Peer.any_ipv4(), ec2.Port.tcp(22)) image = ec2.AmazonLinuxImage() instance = ec2.Instance( self, "test_instance", machine_image=image, instance_type=ec2.InstanceType("t2.micro"), key_name="app_key-d2f12cf", vpc=vpc, security_group=security_group, ) public_ip = cdk.CfnOutput(self, "public_ip", value=instance.instance_public_ip)
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # The code that defines your stack goes here vpc = ec2.Vpc( self, "MyVpc", max_azs=1 ) sg = ec2.SecurityGroup( self, "SG", description='Allow ssh access to ec2 instances', vpc=vpc ) sg.add_ingress_rule( peer=ec2.Peer.any_ipv4(), connection=ec2.Port.tcp(22) ) ec2instance = ec2.Instance( self, "EC2INSTANCE", vpc=vpc, instance_type=ec2.InstanceType.of( ec2.InstanceClass.BURSTABLE2, ec2.InstanceSize.MICRO ), machine_image=ec2.AmazonLinuxImage(), vpc_subnets={'subnet_type': ec2.SubnetType.PUBLIC}, security_group=sg, key_name="MyNVKeyPair" )
def __init__(self, scope: core.Construct, id: str, nw_stack: core.Stack, **kwargs) -> None: super().__init__(scope, id, **kwargs) ec2_sg = _ec2.SecurityGroup(self, id='test-ec2-instance-sg', vpc=nw_stack.app_vpc) bastion_sg = _ec2.SecurityGroup(self, id='bastion-sg', vpc=nw_stack.app_vpc) prv = _ec2.Instance(self, id='tgw_poc_instance', instance_type=_ec2.InstanceType('t3a.nano'), machine_image=_ec2.AmazonLinuxImage(), key_name=EC2Stack.KEY_PAIR, security_group=ec2_sg, instance_name='tgw_nat_test_instance', vpc=nw_stack.app_vpc, vpc_subnets=_ec2.SubnetSelection( subnet_type=_ec2.SubnetType.ISOLATED)) bastion = _ec2.Instance(self, id='tgw_poc_bastion', instance_type=_ec2.InstanceType('t3a.nano'), machine_image=_ec2.AmazonLinuxImage(), key_name=EC2Stack.KEY_PAIR, security_group=bastion_sg, instance_name='tgw_test_bastion', vpc=nw_stack.app_vpc, vpc_subnets=_ec2.SubnetSelection( subnet_type=_ec2.SubnetType.PUBLIC)) ssh_port = _ec2.Port(protocol=_ec2.Protocol.TCP, string_representation="tcp_22", from_port=EC2Stack.SSH_PORT, to_port=EC2Stack.SSH_PORT) bastion_sg.add_ingress_rule(peer=_ec2.Peer.ipv4(EC2Stack.SSH_IP), connection=ssh_port, description='Allow SSH access from SSH_IP') ec2_sg.add_ingress_rule( peer=bastion_sg, connection=ssh_port, description='Allow SSH access from bastion host')
def __init__(self, scope: core.Construct, id: str, vpc, **kwargs) -> None: super().__init__(scope, id, **kwargs) instance_type = ec2.InstanceType('m5.large') machine_image = ec2.AmazonLinuxImage( edition=ec2.AmazonLinuxEdition.STANDARD, generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX_2) key_name = 'kyn-key' allow_all_outbound = True user_data = ec2.UserData.for_linux() user_data.add_commands('sudo yum -y update', 'sudo yum install -y httpd', 'sudo systemctl start httpd') ## create security group # class aws_cdk.aws_ec2.SecurityGroup(scope, id, *, vpc, allow_all_outbound=None, description=None, security_group_name=None) cidr = vpc.vpc_cidr_block self.http_server_sg = ec2.SecurityGroup( self, 'http-server-sg', vpc=vpc, allow_all_outbound=True, security_group_name='http-server-sg') self.http_server_sg.add_ingress_rule( peer=ec2.Peer.ipv4('0.0.0.0/0'), connection=ec2.Port.tcp(80), description='Allow Inbound http Connection') self.http_server_sg.add_ingress_rule( peer=ec2.Peer.ipv4(cidr), connection=ec2.Port.all_traffic(), description='Allow Inbound Connection from VPC') security_group = self.http_server_sg ## create iam role for ec2 # class aws_cdk.aws_iam.Role(scope, id, *, assumed_by, external_id=None, external_ids=None, inline_policies=None, managed_policies=None, max_session_duration=None, path=None, permissions_boundary=None, role_name=None) managed_policies = [] policy = iam.ManagedPolicy.from_aws_managed_policy_name( 'AmazonSSMManagedInstanceCore') managed_policies.append(policy) role = iam.Role(self, 'http-server-role', assumed_by=iam.ServicePrincipal('ec2'), managed_policies=managed_policies, role_name=None) ## create http server instance # class aws_cdk.aws_ec2.Instance(scope, id, *, instance_type, machine_image, vpc, allow_all_outbound=None, availability_zone=None, instance_name=None, key_name=None, resource_signal_timeout=None, role=None, security_group=None, user_data=None, vpc_subnets=None) self.http_server = ec2.Instance( self, 'ec2', instance_type=instance_type, machine_image=machine_image, vpc=vpc, allow_all_outbound=allow_all_outbound, key_name=key_name, security_group=security_group, user_data=user_data, role=role, vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC))
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # The code that defines your stack goes here # Just do it ugly straight line for now vpc = ec2.Vpc(self, "Github-Selfhost-vpc", nat_gateways=0, enable_dns_hostnames=True, enable_dns_support=True, subnet_configuration=[ ec2.SubnetConfiguration( name="selfhost_public", subnet_type=ec2.SubnetType.PUBLIC) ]) # AMI # al2 = getLatestAL2Ami() # centos = getLatestCentosAmi() ubuntu = getLatestUbuntuAmi() instances = [] # Instance creation. # Stands up an instance, then installs the github runner on the first boot. # This can be made into a loop. TODO user_data_focal = ec2.UserData.for_linux() user_data_focal.add_commands( "apt-get update -y", "apt-get upgrade -y", "apt-get install -y curl software-properties-common", "curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add -", "add-apt-repository " "'deb [arch=arm64] https://download.docker.com/linux/ubuntu focal stable'", "apt-get update -y", "apt-get install -y docker-ce docker-ce-cli containerd.io", "systemctl start docker") instance_focal1 = ec2.Instance( self, "focal1-tester", instance_type=ec2.InstanceType("m6g.large"), machine_image=ubuntu, vpc=vpc, key_name=KEY_NAME, block_devices=[ ec2.BlockDevice(device_name='/dev/sda1', volume=ec2.BlockDeviceVolume( ec2.EbsDeviceProps(volume_size=128))) ], user_data=user_data_focal) instances.append(instance_focal1) # Allow inbound HTTPS connections for instance in instances: instance.connections.allow_from_any_ipv4( ec2.Port.tcp(443), 'Allow inbound HTTPS connections') if AWS_PREFIX_LIST: instance.connections.allow_from( ec2.Peer.prefix_list(AWS_PREFIX_LIST), ec2.Port.tcp(22), 'Allow inbound SSH connections from trusted sources')
def create_ec2(self): instance = ec2.Instance(self, guid('EC2-'), vpc=self.vpc, instance_type=self.DEFAULT_EC2_TYPE, machine_image=self.DEFAULT_IMAGE) return instance
def __init__(self, scope: core.Construct, id: str, vpc: ec2.Vpc, project_name: str, instance_config: dict, ssh_config: dict, build_files: s3.Asset, iam_role: iam.Role = None, **kwargs) -> None: super().__init__(scope, id, **kwargs) # IAM Role if iam_role is None: self._role = iam.Role( self, id='role', assumed_by=iam.ServicePrincipal('ec2.amazonaws.com')) else: self._role = iam_role # Security Group self._security_group = ec2.SecurityGroup( self, id='security_group', vpc=vpc, security_group_name=project_name) if ssh_config is not None: self._security_group.add_ingress_rule( peer=ec2.Peer.ipv4(ssh_config['cidr']), connection=ec2.Port.tcp(22), description='Allow SSH traffic') subnet = ec2.SubnetType.PUBLIC # Instance self._instance = ec2.Instance( self, id='instance', instance_type=instance_config['instance_type'], vpc=vpc, key_name=instance_config['key_name'], role=self._role, machine_image=instance_config['ami'], security_group=self._security_group, vpc_subnets=ec2.SubnetSelection(subnet_type=subnet)) # User data self._instance.add_user_data( 'aws s3 cp s3://{}/{} .'.format(build_files.s3_bucket_name, build_files.s3_object_key), 'unzip *.zip -d /tmp', # 'echo \'{ "Test": ["{}"] }\' > /tmp/ranges.json'.format(ssh_config['cidr']), 'chmod +x /tmp/build.sh; /tmp/build.sh {}'.format(project_name)) # Tags core.Tag.add(self._instance, 'Project', project_name)
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # The code that defines your stack goes here # lookup existing VPC vpc = ec2.Vpc( self, "yardstiqVpc", ) # create a new security group sec_group = ec2.SecurityGroup( self, "sec-group-allow-ssh", vpc=vpc, allow_all_outbound=True, ) # add a new ingress rule to allow port 22 to internal hosts sec_group.add_ingress_rule( peer=ec2.Peer.any_ipv4(), #('10.0.0.0/16'), description="Allow SSH connection", connection=ec2.Port.tcp(22)) # Define commands to run on startup user_data = ec2.UserData.for_linux() # command = """ # git clone https://github.com/Roger-luo/quantum-benchmarks > /home/ubuntu/yardstiq.log \ # && cd quantum-benchmarks >> /home/ubuntu/yardstiq.log \ # && bin/benchmark setup >> /home/ubuntu/yardstiq.log \ # && bin/benchmark benchmark >> /home/ubuntu/yardstiq.log # """ command = """ echo "Hello World" >> /home/ubuntu/yardstiq.log echo "sudo halt" | at now + 1 minutes """ user_data.add_commands(command) # define a new ec2 instance ec2_instance = ec2.Instance( self, "ec2-instance", key_name="yardstiqPem", instance_name=instanceName, instance_type=ec2.InstanceType(instanceType), machine_image=ec2.GenericLinuxImage( {'us-west-1': 'ami-031b673f443c2172c'}), vpc=vpc, security_group=sec_group, vpc_subnets=ec2.SubnetSelection(subnet_type=ec2.SubnetType.PUBLIC), user_data=user_data, user_data_causes_replacement=True)
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) # VPC vpc = ec2.Vpc(self, "VPC", nat_gateways=0, subnet_configuration=[ ec2.SubnetConfiguration( name="public", subnet_type=ec2.SubnetType.PUBLIC) ]) # AMI amzn_linux = ec2.MachineImage.latest_amazon_linux( generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX_2, edition=ec2.AmazonLinuxEdition.STANDARD, virtualization=ec2.AmazonLinuxVirt.HVM, storage=ec2.AmazonLinuxStorage.GENERAL_PURPOSE) # Instance Role and SSM Managed Policy role = iam.Role(self, "InstanceSSM", assumed_by=iam.ServicePrincipal("ec2.amazonaws.com")) role.add_managed_policy( iam.ManagedPolicy.from_aws_managed_policy_name( "service-role/AmazonEC2RoleforSSM")) # Security Group security_group = ec2.SecurityGroup(self, "SecurityGroup", vpc=vpc, allow_all_outbound=True) security_group.add_ingress_rule(peer=ec2.Peer().ipv4("0.0.0.0/0"), connection=ec2.Port.tcp(80)) # Instance instance = ec2.Instance(self, "Instance", instance_type=ec2.InstanceType("t3a.micro"), machine_image=amzn_linux, vpc=vpc, role=role, security_group=security_group) # Script in S3 as Asset asset = Asset(self, "Asset", path=os.path.join(dirname, "configure.sh")) local_path = instance.user_data.add_s3_download_command( bucket=asset.bucket, bucket_key=asset.s3_object_key) # Userdata executes script from S3 instance.user_data.add_execute_file_command(file_path=local_path) asset.grant_read(instance.role)
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None: super().__init__(scope, id, **kwargs) prefix = self.node.try_get_context("project_name") env_name = self.node.try_get_context("env") volumne_size = self.node.try_get_context("volumne_size") ec2_type = self.node.try_get_context("ec2_type") # read parameters from SSM vpcid = ssm.StringParameter.value_from_lookup(self, "/cdk/ec2/vpc_id") # Get the existing VPC my_vpc = ec2.Vpc.from_lookup(self, "VPC", vpc_id=vpcid) # self.security_group = ec2.CfnSecurityGroup( # self, # id="web_server_sg", # vpc_id=vpc.ref, # group_name=env_name+'-'+prefix+'-www01', # group_description="Web server security group", # # security_group_ingress=[ingress_ssh], # # security_group_egress=[egress_all], # tags = [core.CfnTag(key="Name", value=env_name+'-'+prefix+'-www01')] # ) # public Ingress # ec2.CfnSecurityGroupIngress(self, 'publicsecuritygroupingress01', group_id=self.security_group.ref, ip_protocol='tcp', cidr_ip='0.0.0.0/0', description='http', from_port=80, to_port=80) # ec2.CfnSecurityGroupIngress(self, 'publicsecuritygroupingress02', group_id=self.security_group.ref, ip_protocol='tcp', cidr_ip='0.0.0.0/0', description='ssh', from_port=22, to_port=22) # # public Egress # ec2.CfnSecurityGroupEgress( # self, # 'publicsecuritygroupegress01', # group_id=self.security_group.ref, # ip_protocol='-1', # cidr_ip='0.0.0.0/0' # destination_security_group_id=privatesecuritygroup01.ref, # description='for private', # from_port=22, to_port=22 # ) # private Ingress image_id = ec2.AmazonLinuxImage( generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX_2).get_image( self).image_id # Create an EC2 instance with the above configuration ec2_instance = ec2.Instance( self, "my_ec2_instance", # instance_type=_ec2.InstanceType( # instance_type_identifier=instance_type), # machine_image=_ec2.MachineImage.latest_amazon_linux(), vpc=my_vpc, instance_name="MyInstance", # key_name=key_name, security_group=my_security_group, # role=my_session_mgmt_role, # user_data=_ec2.UserData.custom(user_data) )
def __init__(self, scope: core.Construct, id: str, *, cidr_range: str, transit_gateway: ec2.CfnTransitGateway, role: iam.IRole, **kwargs): super().__init__(scope, id, **kwargs) vpc = ec2.Vpc( self, 'Vpc', cidr=cidr_range, max_azs=2, nat_gateways=1, ) sg = ec2.SecurityGroup( self, 'InstanceSecurityGroup', vpc=vpc, ) sg.add_ingress_rule(ec2.Peer.ipv4('10.0.0.0/8'), ec2.Port.tcp(80)) user_data = ec2.UserData.for_linux() user_data.add_commands(raw_user_data) ec2.Instance( self, 'Instance', role=role, vpc=vpc, security_group=sg, user_data=user_data, instance_type=ec2.InstanceType.of( instance_class=ec2.InstanceClass.BURSTABLE3_AMD, instance_size=ec2.InstanceSize.NANO, ), machine_image=ec2.AmazonLinuxImage( generation=ec2.AmazonLinuxGeneration.AMAZON_LINUX_2, ), ) # TODO: replace by CDK construct when available attachment = ec2.CfnTransitGatewayAttachment( self, 'TransitGatewayAttachment', transit_gateway_id=transit_gateway.ref, vpc_id=vpc.vpc_id, subnet_ids=[subnet.subnet_id for subnet in vpc.private_subnets], ) for i, subnet in enumerate(vpc.private_subnets): # TODO: replace by CDK construct when available route = ec2.CfnRoute( self, 'TransitGatewayRoute{}'.format(i), route_table_id=subnet.route_table.route_table_id, transit_gateway_id=transit_gateway.ref, destination_cidr_block='10.0.0.0/8') route.node.add_dependency(attachment)
def __init__(self, scope: core.Construct, id: str, ** kwargs) -> None: super().__init__(scope, id, **kwargs) vpc = _ec2.Vpc( self, "customVpcId", cidr="10.0.0.0/24", max_azs=2, nat_gateways=0, subnet_configuration=[ _ec2.SubnetConfiguration( name="public", subnet_type=_ec2.SubnetType.PUBLIC ) ] ) # Read BootStrap Script with open("bootstrap_scripts/install_httpd.sh", mode="r") as file: user_data = file.read() # WebServer Instance 001 web_server = _ec2.Instance(self, "WebServer002Id", instance_type=_ec2.InstanceType( instance_type_identifier="t2.micro"), instance_name="WebServer002", machine_image=_ec2.MachineImage.generic_linux( {"us-east-1": "ami-0fc61db8544a617ed"} ), vpc=vpc, vpc_subnets=_ec2.SubnetSelection( subnet_type=_ec2.SubnetType.PUBLIC ), user_data=_ec2.UserData.custom(user_data) ) output_1 = core.CfnOutput(self, "webServer002Ip", description="WebServer Public Ip Address", value=f"http://{web_server.instance_public_ip}") # Allow Web Traffic to WebServer web_server.connections.allow_from_any_ipv4( _ec2.Port.tcp(80), description="Allow Web Traffic" ) # Add permission to web server instance profile web_server.role.add_managed_policy( _iam.ManagedPolicy.from_aws_managed_policy_name( "AmazonSSMManagedInstanceCore") ) web_server.role.add_managed_policy( _iam.ManagedPolicy.from_aws_managed_policy_name( "AmazonS3ReadOnlyAccess") )