Exemplo n.º 1
0
 def _provision_router(self, protect=False):
     public_router = networking.Router(
         self.props.public_router_name,
         name=self.props.public_router_name,
         external_network_id=self.props.external_network["id"],
         opts=ResourceOptions(delete_before_replace=True, protect=protect),
     )
     networking.RouterInterface(
         "router-interface-management",
         router_id=public_router.id,
         subnet_id=self.resources.mgmt_subnet.id,
         opts=ResourceOptions(
             delete_before_replace=True,
             protect=protect,
         ),
     )
     networking.RouterInterface(
         "router-interface-deployment",
         router_id=public_router.id,
         subnet_id=self.resources.deploy_subnet.id,
         opts=ResourceOptions(
             delete_before_replace=True,
             protect=protect,
         ),
     )
     pulumi.export(
         "PublicRouter",
         Output.all(public_router.name, public_router.id).apply(
             lambda args: f"{args[0]} ({args[1]})"
         ),
     )
Exemplo n.º 2
0
 def construct(self, name: str, typ: str, urn: str):
     if typ == "test:index:resource":
         return MyCustomResource(name, typ, ResourceOptions(urn=urn))
     elif typ == "test:index:component":
         return MyComponentResource(name, typ, ResourceOptions(urn=urn))
     else:
         raise Exception(f"unknown resource type {typ}")
Exemplo n.º 3
0
    def _create_from_arguments(self, name, number_of_availability_zones):
        # type: (str, int) -> None
        """
        Creates a new Network from the constructor arguments (i.e. not from a
        VPC that already exists).
        :param name: The name of the new network
        :param number_of_availability_zones: The number of AZs to create subnets in
        :return: None
        """
        number_of_availability_zones = number_of_availability_zones or 2
        if number_of_availability_zones < 1 or number_of_availability_zones >= 4:
            raise RunError(
                "Unsupported number of available zones for Network: " +
                str(number_of_availability_zones))

        self.use_private_subnets = self.use_private_subnets or False
        vpc = ec2.Vpc(name,
                      cidr_block="10.10.0.0/16",
                      enable_dns_hostnames=True,
                      enable_dns_support=True,
                      tags={
                          "Name": name,
                      },
                      __opts__=ResourceOptions(parent=self))

        self.vpc_id = vpc.id
        self.security_group_ids = [vpc.default_security_group_id]

        internet_gateway = ec2.InternetGateway(
            name,
            vpc_id=vpc.id,
            tags={
                "Name": name,
            },
            __opts__=ResourceOptions(parent=self))

        public_route_table = ec2.RouteTable(
            name,
            vpc_id=vpc.id,
            routes=[{
                "cidrBlock": "0.0.0.0/0",
                "gatewayId": internet_gateway.id
            }],
            tags={"Name": name},
            __opts__=ResourceOptions(parent=self))

        self.subnet_ids = []
        self.public_subnet_ids = []
        for i in range(number_of_availability_zones):
            route_table, subnet = self._create_subnet(name, public_route_table,
                                                      i)

            # pylint: disable=unused-variable
            route_table_association = ec2.RouteTableAssociation(
                "%s-%d" % (name, i),
                subnet_id=subnet.id,
                route_table_id=route_table.id,
                __opts__=ResourceOptions(parent=self))

            self.subnet_ids.append(subnet.id)
Exemplo n.º 4
0
 def _provision_dns_record(self, dns_name, ipaddr):
     dns_zone = dns.get_dns_zone(name=self.props.dns_zone_name)
     reverse_dns_zone = dns.get_dns_zone(
         name=self.props.reverse_dns_zone_name,
         # opts=InvokeOptions(provider=self.provider_ccadmin_master),
     )
     dns_name = dns_name + "." + self.props.dns_zone_name
     r = dns.RecordSet(
         dns_name,
         name=dns_name,
         records=[ipaddr],
         type="A",
         ttl=1800,
         zone_id=dns_zone.id,
         opts=ResourceOptions(delete_before_replace=True),
     )
     dns.RecordSet(
         "reverse-" + dns_name,
         name=ipaddr.split(".")[-1] + "." + self.props.reverse_dns_zone_name,
         records=[dns_name],
         type="PTR",
         ttl=1800,
         zone_id=reverse_dns_zone.id,
         opts=ResourceOptions(
             # provider=self.provider_ccadmin_master,
             delete_before_replace=True,
             depends_on=[r],
         ),
     )
Exemplo n.º 5
0
    def __init__(self, name, instance_type="t2.micro", node_count=0, vpc_id=None, key_name=None, subnet_ids=None,
                 version=None, bastion_sg_id=None, asg_tags=None):
        ComponentResource.__init__(self, "aws:compute:eks", name, None, None)
        self.vpc_id = vpc_id

        self._create_compute_iam_roles(name)
        self._create_sgs(bastion_sg_id)

        vpc_config = {
            "security_group_ids": [self.master_sg],
            "subnet_ids": subnet_ids
        }

        eks_tags = {
            "Name": name
        }
        cluster = eks.Cluster(name, name=name, role_arn=self.eks_master_role, tags=eks_tags, vpc_config=vpc_config,
                             __opts__=ResourceOptions(parent=self, depends_on=self.cluster_role_attachment_dependencies))

        eks_ami = _get_eks_ami(version)

        user_data = self._build_asg_userdata(cluster, name)
        node_launch_config = ec2.LaunchConfiguration("%s-launch-config" % name, image_id=eks_ami, instance_type=instance_type,
                                                     iam_instance_profile=self.eks_worker_instance_profile, key_name=key_name,
                                                     name=name, security_groups=[self.worker_sg],
                                                     user_data=user_data,
                                                     __opts__=ResourceOptions(parent=self))
        asg_tags = {
            "key": "kubernetes.io/cluster/%s" % name,
            "value": "owned",
            "propagateAtLaunch": True
        }
        node_asg = autoscaling.Group("%s-asg" % name, launch_configuration=node_launch_config.id, max_size=node_count,
                                     min_size=node_count, desired_capacity=node_count, vpc_zone_identifiers=subnet_ids,
                                     tags=[asg_tags], __opts__=ResourceOptions(parent=self, depends_on=[cluster]))

        # # TODO: create configmap to join the nodes to cluster
        # configmap_data = {
        #     "mapRoles" : [{
        #         "rolearn":self.eks_worker_role.arn,
        #         "username":"******",
        #         "groups":[
        #             "system:bootstrappers",
        #             "system:nodes"
        #         ]
        #     }]
        # }
        # configmap_metadata = {
        #     "name": "aws-auth",
        #     "namespace": "kube-system"
        # }
        #
        # k8s_provider = Provider("dtd-cluster", kubeconfig=cluster.certificate_authority)
        # join_nodes = ConfigMap("join-nodes-cm", data=configmap_data, metadata=configmap_metadata,
        #                        __opts__=ResourceOptions(parent=self, provider=k8s_provider))

        self.cluster_ca = cluster.certificate_authority['data']
Exemplo n.º 6
0
    def _provision_helper_vm(self):
        init_script = r"""#!/bin/bash
echo 'net.ipv4.conf.default.rp_filter = 2' >> /etc/sysctl.conf
echo 'net.ipv4.conf.all.rp_filter = 2' >> /etc/sysctl.conf
/usr/sbin/sysctl -p /etc/sysctl.conf
"""
        sg = compute.SecGroup(
            "helper-vm-sg",
            description="allow ssh",
            rules=[
                compute.SecGroupRuleArgs(
                    cidr="0.0.0.0/0", from_port=22, to_port=22, ip_protocol="tcp"
                )
            ],
        )
        external_port = networking.Port(
            "helper-vm-external-port",
            network_id=self.resources.mgmt_network.id,
            fixed_ips=[
                networking.PortFixedIpArgs(
                    subnet_id=self.resources.mgmt_subnet.id,
                    ip_address=self.props.helper_vm["ip"],
                )
            ],
            security_group_ids=[sg.id],
        )
        helper_vm = compute.Instance(
            "helper-vm",
            name="helper-vm",
            flavor_name=self.props.helper_vm["flavor_name"],
            image_name=self.props.helper_vm["image_name"],
            networks=[
                compute.InstanceNetworkArgs(name=self.resources.deploy_network.name),
            ],
            key_pair=self.resources.keypair.name,
            user_data=init_script,
            opts=ResourceOptions(
                delete_before_replace=True,
                ignore_changes=["image_name", "key_pair"],
            ),
        )
        attach_external_ip = compute.InterfaceAttach(
            "helper-vm-attatch",
            instance_id=helper_vm.id,
            port_id=external_port.id,
            opts=ResourceOptions(delete_before_replace=True, depends_on=[helper_vm]),
        )
        pulumi.export(
            "HelperVM",
            Output.all(
                helper_vm.name, helper_vm.id, external_port.all_fixed_ips[0]
            ).apply(lambda args: f"{args[0]} ({args[1]}, {args[2]})"),
        )
        return helper_vm, attach_external_ip
Exemplo n.º 7
0
    def _provision_network(self, protect=False):
        deploy_network = networking.Network(
            self.props.deploy_network["name"],
            name=self.props.deploy_network["name"],
            opts=ResourceOptions(delete_before_replace=True, protect=protect),
        )
        deploy_subnet = networking.Subnet(
            self.props.deploy_network["subnet_name"],
            name=self.props.deploy_network["subnet_name"],
            network_id=deploy_network.id,
            cidr=self.props.deploy_network["cidr"],
            ip_version=4,
            opts=ResourceOptions(delete_before_replace=True, protect=protect),
        )
        public_router = networking.Router(
            self.props.public_router_name,
            name=self.props.public_router_name,
            external_network_id=self.props.external_network["id"],
            opts=ResourceOptions(delete_before_replace=True, protect=protect),
        )
        networking.RouterInterface(
            "router-interface-management",
            router_id=public_router.id,
            subnet_id=self.resources.mgmt_subnet.id,
            opts=ResourceOptions(
                # provider=self.provider_cloud_admin,
                delete_before_replace=True,
                protect=protect,
            ),
        )
        networking.RouterInterface(
            "router-interface-deployment",
            router_id=public_router.id,
            subnet_id=deploy_subnet.id,
            opts=ResourceOptions(
                # provider=self.provider_cloud_admin,
                delete_before_replace=True,
                protect=protect,
            ),
        )

        pulumi.export(
            "DeploymentNetwork",
            Output.all(deploy_network.name, deploy_network.id).apply(
                lambda args: f"{args[0]} ({args[1]})"
            ),
        )
        pulumi.export(
            "PublicRouter",
            Output.all(public_router.name, public_router.id).apply(
                lambda args: f"{args[0]} ({args[1]})"
            ),
        )
Exemplo n.º 8
0
    def __init__(
        self,
        name,
        vpc_environment: VPC,
        opts=None,
    ):
        super().__init__("nuage:aws:DevelopmentEnvironment:EFS",
                         f"{name}EfsEnvironment", None, opts)

        file_system = efs.FileSystem(f"{name}FileSystem")
        targets = []

        for i in range(0, len(vpc_environment.public_subnets)):
            targets.append(
                efs.MountTarget(
                    f"{name}MountTarget{i}",
                    file_system_id=file_system.id,
                    subnet_id=vpc_environment.public_subnets[i].id,
                    security_groups=[vpc_environment.security_group],
                    opts=ResourceOptions(depends_on=[
                        vpc_environment.security_group,
                        vpc_environment.public_subnets[i],
                    ]),
                ))

        access_point = efs.AccessPoint(
            f"{name}AccessPoint",
            file_system_id=file_system.id,
            posix_user={
                "uid": 1000,
                "gid": 1000
            },
            root_directory={
                "path": "/",
                "creationInfo": {
                    "ownerGid": 1000,
                    "ownerUid": 1000,
                    "permissions": "755",
                },
            },
            opts=ResourceOptions(depends_on=targets),
        )

        outputs = {
            "file_system_id": file_system.id,
            "access_point": access_point
        }

        self.set_outputs(outputs)
Exemplo n.º 9
0
    def __init__(self,
                 name,
                 size="t2.micro",
                 security_groups=None,
                 tags=None,
                 subnet_id=None,
                 key=None,
                 user_data_dict=None):
        ComponentResource.__init__(self, "aws:compute:server", name, None,
                                   None)

        type = tags['type']

        self.user_data = self.get_user_data(user_data_dict)
        self.ami_id = _get_ami()
        self.size = size
        self.name = name
        self.security_groups = security_groups
        self.subnet_id = subnet_id

        server = aws.ec2.Instance(
            self.name,
            instance_type=self.size,
            security_groups=self.security_groups,
            tags=tags,
            ami=self.ami_id,
            user_data=self.user_data,
            key_name=key,
            associate_public_ip_address=_get_public_ip(type),
            subnet_id=self.subnet_id,
            __opts__=ResourceOptions(parent=self))

        self.public_dns = server.public_dns
        self.private_ip = server.private_ip
Exemplo n.º 10
0
def configure_dns(domain: str, zone_id: pulumi.Input):
    # SSL Cert must be created in us-east-1 unrelated to where the API is deployed.
    aws_us_east_1 = aws.Provider("aws-provider-us-east-1", region="us-east-1")
    # Request ACM certificate
    ssl_cert = aws.acm.Certificate(
        "ssl-cert",
        domain_name=domain,
        validation_method="DNS",
        opts=ResourceOptions(provider=aws_us_east_1))
    # Create DNS record to prove to ACM that we own the domain
    ssl_cert_validation_dns_record = aws.route53.Record(
        "ssl-cert-validation-dns-record",
        zone_id=zone_id,
        name=ssl_cert.domain_validation_options.apply(
            lambda options: options[0].resource_record_name),
        type=ssl_cert.domain_validation_options.apply(
            lambda options: options[0].resource_record_type),
        records=[
            ssl_cert.domain_validation_options.apply(
                lambda options: options[0].resource_record_value)
        ],
        ttl=10 * 60)
    # Wait for the certificate validation to succeed
    validated_ssl_certificate = aws.acm.CertificateValidation(
        "ssl-cert-validation",
        certificate_arn=ssl_cert.arn,
        validation_record_fqdns=[ssl_cert_validation_dns_record.fqdn],
        opts=ResourceOptions(provider=aws_us_east_1))
    # Configure API Gateway to be able to use domain name & certificate
    api_domain_name = aws.apigateway.DomainName(
        "api-domain-name",
        certificate_arn=validated_ssl_certificate.certificate_arn,
        domain_name=domain)
    # Create DNS record
    aws.route53.Record("api-dns",
                       zone_id=zone_id,
                       type="A",
                       name=domain,
                       aliases=[
                           aws.route53.RecordAliasArgs(
                               name=api_domain_name.cloudfront_domain_name,
                               evaluate_target_health=False,
                               zone_id=api_domain_name.cloudfront_zone_id)
                       ])
    return api_domain_name
Exemplo n.º 11
0
 def _create_public_subnet_route_table(self, vpcid):
     # create the public subnet for the NAT
     ig_name = "%s-ig" % self.name
     internet_gateway = ec2.InternetGateway(
         ig_name,
         vpc_id=vpcid,
         tags=self.vpc_tags,
         __opts__=ResourceOptions(parent=self))
     rt_name = "%s-public-rt" % self.name
     public_route_table = ec2.RouteTable(
         rt_name,
         vpc_id=vpcid,
         routes=[{
             "cidrBlock": "0.0.0.0/0",
             "gatewayId": internet_gateway.id
         }],
         __opts__=ResourceOptions(parent=self))
     return public_route_table.id
Exemplo n.º 12
0
    def _create_public_subnet(self, vpcid, public_route_table_id, azid):
        subnet_name = "%s-%d-public-subnet" % (self.name, azid)
        az_id = self._get_az(azid)
        subnet = ec2.Subnet(subnet_name,
                            availability_zone_id=az_id,
                            cidr_block=("10.0.%d.0/24" % azid),
                            vpc_id=vpcid,
                            tags=self.vpc_tags,
                            map_public_ip_on_launch=True,
                            __opts__=ResourceOptions(parent=self))

        prta_name = "%s-rt-assoc" % subnet_name
        public_route_table_association = ec2.RouteTableAssociation(
            prta_name,
            route_table_id=public_route_table_id,
            subnet_id=subnet.id,
            __opts__=ResourceOptions(parent=self))
        return subnet.id
Exemplo n.º 13
0
 def _provision_deployment_subnet(self, protect=False):
     deploy_subnet = networking.Subnet(
         self.props.deploy_network["subnet_name"],
         name=self.props.deploy_network["subnet_name"],
         network_id=self.resources.deploy_network.id,
         cidr=self.props.deploy_network["cidr"],
         ip_version=4,
         opts=ResourceOptions(delete_before_replace=True, protect=protect),
     )
     return deploy_subnet
Exemplo n.º 14
0
 def _create_private_subnet_route_table(self, public_subnet_id, vpcid):
     eip_name = "%s-nat-eip" % self.name
     nat_name = "%s-nat" % self.name
     eip = ec2.Eip(eip_name, __opts__=ResourceOptions(parent=self))
     nat_gateway = ec2.NatGateway(nat_name,
                                  subnet_id=public_subnet_id,
                                  allocation_id=eip.id,
                                  tags=self.vpc_tags,
                                  __opts__=ResourceOptions(parent=self))
     rt_name = "%s-private-rt" % self.name
     private_route_table = ec2.RouteTable(
         rt_name,
         vpc_id=vpcid,
         routes=[{
             "cidrBlock": "0.0.0.0/0",
             "gatewayId": nat_gateway.id
         }],
         __opts__=ResourceOptions(parent=self))
     return private_route_table.id
Exemplo n.º 15
0
    def _create_compute_iam_roles(self, name):
        # According to AWS docs, this trust policy is required for the masters & the agents
        # TODO: can we curl for this & check if its different? use the updated one & log if different.
        # Note: multi line string requires open bracket here. Adding a newline results in a malformed policy doc
        mrp ="""{
"Version": "2012-10-17",
"Statement": [
    {
        "Effect": "Allow",
        "Principal": {
            "Service": "eks.amazonaws.com"
        },
        "Action": "sts:AssumeRole"
    }
]
}"""
        #Trust policy for the worker role
        wrp ="""{
  "Version": "2012-10-17",
  "Statement": [
    {
      "Effect": "Allow",
      "Principal": {
        "Service": "ec2.amazonaws.com"
      },
      "Action": "sts:AssumeRole"
    }
  ]
}"""

        policy_arn_string = "arn:aws:iam::aws:policy/"

        eks_master_role = iam.Role("eks-service-role", name="%s-master-role" % name, description="role for eks service", assume_role_policy=mrp,
                                   __opts__=ResourceOptions(parent=self))
        eks_worker_role = iam.Role("eks-service-worker-role", name="%s-worker-role" % name, description="role for eks worker nodes", assume_role_policy=wrp,
                                   __opts__=ResourceOptions(parent=self))
        eks_worker_instance_profile = iam.InstanceProfile("eks_worker_instance_profile", name="%s-instance-profile" % name,
                                                          role=eks_worker_role.id, __opts__=ResourceOptions(parent=self))

        # attach required policies to the master plane
        d1 = iam.PolicyAttachment("policy-AmazonEKSClusterPolicy", policy_arn="%sAmazonEKSClusterPolicy" % policy_arn_string, roles=[eks_master_role.id],
                             __opts__=ResourceOptions(parent=self))
        d2 = iam.PolicyAttachment("policy-AmazonEKSServicePolicy", policy_arn="%sAmazonEKSServicePolicy" % policy_arn_string, roles=[eks_master_role.id],
                             __opts__=ResourceOptions(parent=self))

        # attach required policies to the worker nodes
        iam.PolicyAttachment("policy-AmazonEKSWorkerNodePolicy", policy_arn="%sAmazonEKSWorkerNodePolicy" % policy_arn_string, roles=[eks_worker_role.id],
                             __opts__=ResourceOptions(parent=self))
        iam.PolicyAttachment("policy-AmazonEKS_CNI_Policy", policy_arn="%sAmazonEKS_CNI_Policy" % policy_arn_string, roles=[eks_worker_role.id],
                             __opts__=ResourceOptions(parent=self))
        iam.PolicyAttachment("policy-AmazonEC2ContainerRegistryReadOnly", policy_arn="%sAmazonEC2ContainerRegistryReadOnly" % policy_arn_string, roles=[eks_worker_role.id],
                             __opts__=ResourceOptions(parent=self))

        self.eks_master_role = eks_master_role.arn
        self.eks_worker_role = eks_worker_role
        self.cluster_role_attachment_dependencies = [d1, d2]
        self.eks_worker_instance_profile = eks_worker_instance_profile.name
Exemplo n.º 16
0
 def _provision_deployment_network(self, protect=False):
     deploy_network = networking.Network(
         self.props.deploy_network["name"],
         opts=ResourceOptions(delete_before_replace=True, protect=protect),
     )
     pulumi.export(
         "DeploymentNetwork",
         Output.all(deploy_network.name, deploy_network.id).apply(
             lambda args: f"{args[0]} ({args[1]})"
         ),
     )
     return deploy_network
Exemplo n.º 17
0
 def _provision_private_networks(self):
     private_networks = {}
     for props in self.props.private_networks:
         network = networking.Network("private-network-" + props["name"])
         subnet = networking.Subnet(
             "subnet-" + props["name"],
             network_id=network.id,
             cidr=props["cidr"],
             ip_version=4,
             opts=ResourceOptions(delete_before_replace=True),
         )
         networking.RouterInterface(
             "router-interface-" + props["name"],
             router_id=self.resources.private_router.id,
             subnet_id=subnet.id,
             opts=ResourceOptions(delete_before_replace=True),
         )
         private_networks[props["name"]] = {
             "network": network,
             "subnet": subnet,
             "vlan_id": props["vlan_id"],
         }
     return private_networks
Exemplo n.º 18
0
    def _create_private_subnet(self, vpcid, private_route_table_id, azid):
        if private_route_table_id is None:
            raise RunError(
                "attempting to create a private subnet without a private subnet route table"
            )

        subnet_name = "%s-%d-private-subnet" % (self.name, azid)
        az_id = self._get_az(azid)
        subnet = ec2.Subnet(subnet_name,
                            availability_zone_id=az_id,
                            cidr_block=("10.0.%d.0/24" % azid),
                            vpc_id=vpcid,
                            tags=self.vpc_tags,
                            map_public_ip_on_launch=False,
                            __opts__=ResourceOptions(parent=self))

        prta_name = "%s-rt-assoc" % subnet_name
        private_route_table_assocation = ec2.RouteTableAssociation(
            prta_name,
            route_table_id=private_route_table_id,
            subnet_id=subnet.id,
            __opts__=ResourceOptions(parent=self))
        return subnet.id
Exemplo n.º 19
0
    def _create_sgs(self, bastion_id=None):
        #TODO: if infra left up for a while, security groups cant be deleted. are they modified when running? Need a tag?

        # Create the security groups first
        master_sg = ec2.SecurityGroup("master-sg", vpc_id=self.vpc_id, description="security group for communication with the eks master plance",
                                      __opts__=ResourceOptions(parent=self))
        worker_sg = ec2.SecurityGroup("worker-sg", vpc_id=self.vpc_id, description="security group for communication with the worker nodes",
                                      __opts__=ResourceOptions(parent=self))

        # Create the egress/ingress rules for the master
        master_sg_egress = ec2.SecurityGroupRule("master-sg-egress", type="egress", cidr_blocks=["0.0.0.0/0"], from_port=0,
                                                 to_port=0, protocol=-1, security_group_id=master_sg.id, description="master sg egress",
                                                 __opts__=ResourceOptions(parent=self))
        current_ip = Util.get_workstation_ip()
        master_sg_ingress_workstation = ec2.SecurityGroupRule("master-sg-ingress-from-workstation", type="ingress", from_port=443, to_port=443,
                                                              protocol=-1, security_group_id=master_sg.id, cidr_blocks=["%s/32" % current_ip],
                                                              description="ingress to masters from workstation", __opts__=ResourceOptions(parent=self))
        master_sg_ingress_nodes = ec2.SecurityGroupRule("master-sg-ingress-from-workers", type="ingress", from_port=0, to_port=0,
                                                        protocol=-1, security_group_id=master_sg.id, source_security_group_id=worker_sg.id,
                                                        description="master ingress from workers", __opts__=ResourceOptions(parent=self))

        # Create the egress/ingress rules for the workers
        worker_sg_egress = ec2.SecurityGroupRule("worker-sg-egress", type="egress", cidr_blocks=["0.0.0.0/0"], from_port=0,
                                             to_port=0, protocol=-1, security_group_id=worker_sg.id, description="worker sg egress",
                                             __opts__=ResourceOptions(parent=self))
        worker_sg_ingress_itself = ec2.SecurityGroupRule("worker-sg-ingress-itself", type="ingress", from_port=0, to_port=0,
                                                         protocol=-1, security_group_id=worker_sg.id, self=True, description="worker ingress from itself",
                                                         __opts__=ResourceOptions(parent=self))
        worker_sg_ingress_master = ec2.SecurityGroupRule("worker-sg-ingress-master", type="ingress", from_port=0, to_port=0,
                                                         protocol=-1, security_group_id=worker_sg.id, source_security_group_id=master_sg.id,
                                                         description="worker ingress from master", __opts__=ResourceOptions(parent=self))
        worker_sg_ingress_bastion = ec2.SecurityGroupRule("worker-sg-ingress-bastion", type="ingress", from_port=0, to_port=0,
                                                          protocol=-1, security_group_id=worker_sg.id, source_security_group_id=bastion_id,
                                                          description="worker ingress from bastion host", __opts__=ResourceOptions(parent=self))

        self.master_sg = master_sg.id
        self.worker_sg = worker_sg.id
Exemplo n.º 20
0
 def _provision_reserved_names(self):
     for r in self.props.reserved_ips:
         ipaddr, hostname = r["ip"], r["hostname"]
         self._provision_dns_record(hostname, ipaddr)
         networking.Port(
             "reserved-port-" + ipaddr,
             network_id=self.resources.mgmt_network.id,
             fixed_ips=[
                 networking.PortFixedIpArgs(
                     subnet_id=self.resources.mgmt_subnet.id,
                     ip_address=ipaddr,
                 )
             ],
             opts=ResourceOptions(delete_before_replace=True),
         )
Exemplo n.º 21
0
 def _provision_vsanwiteness_helper(self):
     props = self.props.helper_vsanwitness
     compute.Instance(
         "helper-vsanwitness",
         name="helper-vsanwitness",
         flavor_name=props["flavor_name"],
         image_name=props["image_name"],
         availability_zone=props["availability_zone"],
         networks=[
             compute.InstanceNetworkArgs(name=self.resources.deploy_network.name),
             compute.InstanceNetworkArgs(name=self.resources.private_networks["vsanwitness"]["network"].name),
         ],
         key_pair=self.resources.keypair.name,
         opts=ResourceOptions(
             delete_before_replace=True,
             ignore_changes=["image_name", "key_pair"],
         ),
     )
Exemplo n.º 22
0
# Copyright 2016-2021, Pulumi Corporation.  All rights reserved.

"""Provisions Apache via a Helm chart onto an AKS cluster created in
`cluster.py`.

"""

import pulumi
from pulumi.resource import ResourceOptions
from pulumi_kubernetes.helm.v3 import Chart, ChartOpts

import cluster


apache = Chart('apache-chart',
    ChartOpts(
        chart='apache',
        version='8.3.2',
        fetch_opts={'repo': 'https://charts.bitnami.com/bitnami'}),
    ResourceOptions(provider=cluster.k8s_provider))


apache_service_ip = apache.get_resource('v1/Service', 'apache-chart').apply(
    lambda res: res.status.load_balancer.ingress[0].ip)


pulumi.export('cluster_name', cluster.k8s_cluster.name)
pulumi.export('kubeconfig', cluster.kubeconfig)
pulumi.export('apache_service_ip', apache_service_ip)
Exemplo n.º 23
0
    def __init__(
        self,
        name,
        vpc_environment: VPC,
        efs_environment: EFS,
        github_repo_name: Input[str],
        github_version_name: Input[str] = None,
        opts=None,
    ):
        super().__init__("nuage:aws:DevelopmentEnvironment:CodeBuild",
                         f"{name}CodebuildEnvironment", None, opts)

        # TODO pass this in - with a default?
        def get_codebuild_serice_role_policy():
            return {
                "Version": "2012-10-17",
                "Statement": [{
                    "Action": "*",
                    "Effect": "Allow",
                    "Resource": "*"
                }]
            }

        account_id = get_caller_identity().account_id

        #TODO add random chars on the end of default name to prevent conflicts
        project_name = f"{name}BuildDeploy"

        pulumi_token_param = ssm.Parameter(f"{name}PulumiAccessToken",
                                           type="SecureString",
                                           value="none")

        codebuild_vpc_policy = iam.Policy(
            f"{name}CodeBuildVpcPolicy",
            policy=get_codebuild_vpc_policy(
                account_id,
                vpc_environment.private_subnet.id).apply(json.dumps))

        codebuild_base_policy = iam.Policy(f"{name}CodeBuildBasePolicy",
                                           policy=json.dumps(
                                               get_codebuild_base_policy(
                                                   account_id, project_name)))

        codebuild_service_role_policy = iam.Policy(
            f"{name}CodeBuildServiceRolePolicy",
            policy=json.dumps(get_codebuild_serice_role_policy()))

        codebuild_service_role = iam.Role(f"{name}CodeBuildRole",
                                          assume_role_policy="""{
        "Version": "2012-10-17",
        "Statement": [
            {
            "Effect": "Allow",
            "Principal": {
                "Service": "codebuild.amazonaws.com"
            },
            "Action": "sts:AssumeRole"
            }
        ]
        }""")

        codebuild_vpn_policy_attach = iam.PolicyAttachment(
            f"{name}CodeBuildVpnAttachment",
            policy_arn=codebuild_vpc_policy.arn,
            roles=[codebuild_service_role.name])

        codebuild_base_policy_attach = iam.PolicyAttachment(
            f"{name}CodeBuildBaseAttachment",
            policy_arn=codebuild_base_policy.arn,
            roles=[codebuild_service_role.name])

        codebuild_service_role_policy_attach = iam.PolicyAttachment(
            f"{name}CodeBuildServiceRoleAttachment",
            policy_arn=codebuild_service_role_policy.arn,
            roles=[codebuild_service_role.name])

        codebuild_project = codebuild.Project(
            f"{name}CodeBuildProject",
            description="Builds and deploys the stack",
            name=project_name,
            vpc_config={
                "vpc_id": vpc_environment.vpc.id,
                "subnets": [vpc_environment.private_subnet],
                "security_group_ids": [vpc_environment.security_group.id]
            },
            source={
                "type": "GITHUB",
                "location": github_repo_name
            },
            source_version=github_version_name,
            artifacts={"type": "NO_ARTIFACTS"},
            environment={
                "image":
                "aws/codebuild/amazonlinux2-x86_64-standard:2.0",
                "privileged_mode":
                True,
                "type":
                "LINUX_CONTAINER",
                "compute_type":
                "BUILD_GENERAL1_SMALL",
                "environment_variables": [{
                    "name": "PULUMI_ACCESS_TOKEN",
                    "type": "PARAMETER_STORE",
                    "value": pulumi_token_param.name
                }, {
                    "name":
                    "FILESYSTEM_ID",
                    "type":
                    "PLAINTEXT",
                    "value":
                    efs_environment.file_system_id
                }]
            },
            service_role=codebuild_service_role.arn,
            opts=ResourceOptions(depends_on=[vpc_environment]))

        outputs = {"pulumi_token_param_name": pulumi_token_param.name}

        self.set_outputs(outputs)
Exemplo n.º 24
0
class Random(Resource):
    def __init__(self,
                 name: str,
                 length=int,
                 opts: Optional[ResourceOptions] = None):
        props = {"length": length, "result": None}
        self.length = length
        Resource.__init__(self, "testprovider:index:Random", name, True, props,
                          opts)
        print(props)


class RandomProvider(Provider):
    def __init__(self, opts: Optional[ResourceOptions] = None):
        Provider.__init__(self, "testprovider", "provider", None, opts)


example_url = ResourceOptions(plugin_download_url="get.com")
provider_url = ResourceOptions(plugin_download_url="get.pulumi/test/providers")

# Create resource with specified PluginDownloadURL
r = Random("default", length=10, opts=example_url)
export("default provider", r.result)

# Create provider with specified PluginDownloadURL
provider = RandomProvider(provider_url)
# Create resource that inherits the providers PluginDownloadURL
e = Random("provided", length=8, opts=ResourceOptions(provider=provider))

export("explicit provider", e.result)
Exemplo n.º 25
0
    def __init__(
        self,
        name,
        should_create_gtm_tag=True,
        site_name: Input[str] = None,
        site_url: Input[str] = None,
        opts=None,
    ):
        """
        :param should_create_gtm_tag: Whether or not a GTM environment should be created
                with a tag for calling Amplify and Google Analytics.
        :param site_name: The website name used for the Google Analytics property.  If
                `should_create_gtm_tag` is `True`, this is required.
        :param site_url: The website URL used for the Google Analytics property.  If
                `should_create_gtm_tag` is `True`, this is required.
        """
        super().__init__("nuage:aws:Analytics", name, None, opts)

        account_id = get_caller_identity().account_id
        region = config.region

        bucket = s3.Bucket(f"{name}Bucket")

        firehose_role = iam.Role(
            f"{name}FirehoseRole",
            assume_role_policy=get_firehose_role_trust_policy_document(account_id),
        )

        delivery_stream = kinesis.FirehoseDeliveryStream(
            f"{name}DeliveryStream",
            destination="extended_s3",
            extended_s3_configuration={
                "bucketArn": bucket.arn,
                "role_arn": firehose_role.arn,
                "compressionFormat": "GZIP",
            },
            opts=ResourceOptions(depends_on=[bucket, firehose_role]),
        )

        firehose_role_policy = iam.RolePolicy(
            f"{name}DeliveryStreamPolicy",
            role=firehose_role.name,
            policy=get_firehose_role_policy_document(
                region, account_id, bucket.arn, delivery_stream.name
            ).apply(json.dumps),
        )

        pinpoint_app = pinpoint.App(f"{name}PinpointApp")

        pinpoint_stream_role = iam.Role(
            f"{name}PinpointStreamRole",
            assume_role_policy=get_pinpoint_stream_role_trust_policy_document(),
        )

        pinpoint_stream_role_policy = iam.RolePolicy(
            f"{name}PinpointStreamPolicy",
            role=pinpoint_stream_role.name,
            policy=get_pinpoint_stream_role_policy_document(
                region, account_id, delivery_stream.name, pinpoint_app.application_id
            ).apply(json.dumps),
            opts=ResourceOptions(depends_on=[pinpoint_stream_role, delivery_stream]),
        )

        # IAM roles can take time to propogate so we have to add an artificial delay
        pinpoint_stream_role_delay = Delay(
            "EventStreamRoleDelay",
            10,
            opts=ResourceOptions(depends_on=[pinpoint_stream_role_policy]),
        )

        pinpoint_stream = pinpoint.EventStream(
            f"{name}PinpointEventStream",
            application_id=pinpoint_app.application_id,
            destination_stream_arn=delivery_stream.arn,
            role_arn=pinpoint_stream_role.arn,
            opts=ResourceOptions(
                depends_on=[delivery_stream, pinpoint_app, pinpoint_stream_role_delay,]
            ),
        )

        outputs = {
            "bucket_name": bucket.id,
            "delivery_stream_name": delivery_stream.name,
            "destination_stream_arn": delivery_stream.arn,
            "pinpoint_application_name": pinpoint_app.name,
            "pinpoint_application_id": pinpoint_app.application_id,
            "gtm_container_id": None,
            "gtm_tag": None,
            "gtm_tag_no_script": None,
            "amplify_tag_id": None,
            "event_name": None,
        }

        if should_create_gtm_tag:

            if site_name is None:
                raise Exception("The site_name parameter is required for the GTM tag")

            if site_url is None:
                raise Exception("The site_url parameter is required for the GTM tag")

            gtm = GtmAnalytics(name, site_name, site_url)

            outputs = {
                **outputs,
                "gtm_container_id": gtm.container_id,
                "gtm_tag": gtm.tag,
                "gtm_tag_no_script": gtm.tag_no_script,
                "amplify_tag_id": gtm.amplify_tag_id,
                "event_name": gtm.event_name,
            }

        self.set_outputs(outputs)
Exemplo n.º 26
0
    def __init__(self,
                 name,
                 port_list=None,
                 subnet_count=0,
                 vpc_tags=None,
                 sg_tags=None,
                 private_subnets=None,
                 security_group_ids=None,
                 public_subnets=None):
        ComponentResource.__init__(
            self, "aws:network:dtd", name, {
                "number_of_availability_zones": subnet_count,
                "use_private_subnets": True,
                "subnet_ids": private_subnets,
                "security_group_ids": security_group_ids,
                "public_subnet_ids": public_subnets
            }, None)

        self.name = name
        self.port_list = port_list
        self.subnet_count = subnet_count
        self.vpc_tags = vpc_tags
        self.sg_tags = sg_tags
        self.public_subnets = []
        self.private_subnets = []
        self.security_group_ids = []
        self.vpcid = None

        PUBLIC_SUBNET_COUNT = 0

        if subnet_count < 2 or subnet_count > 3:
            raise RunError(
                "Unsupported amount of subnets! 2 or 3 supported. %d entered" %
                subnet_count)

        # create the VPC
        vpc = ec2.Vpc(name,
                      cidr_block="10.0.0.0/16",
                      enable_dns_hostnames=True,
                      enable_dns_support=True,
                      tags=vpc_tags,
                      __opts__=ResourceOptions(parent=self))

        self.vpcid = vpc.id

        public_route_table_id = self._create_public_subnet_route_table(vpc.id)
        private_route_table_id = None

        # create the subnets
        for i in range(subnet_count):
            # create public subnet(s) first
            if i <= PUBLIC_SUBNET_COUNT:
                self.public_subnets.append(
                    self._create_public_subnet(vpc.id, public_route_table_id,
                                               i))
            # create private subnet(s) next
            else:
                # do create the private route table, eip & NAT just once
                if i == 1:
                    public_subnet_id = self.public_subnets[0]
                    private_route_table_id = self._create_private_subnet_route_table(
                        public_subnet_id, vpc.id)
                self.private_subnets.append(
                    self._create_private_subnet(vpc.id, private_route_table_id,
                                                i))

        self.security_group_ids = self._create_security_groups(vpc.id)
        self.public_sg = self.security_group_ids['public']
        self.private_sg = self.security_group_ids['private']

        # This does not work because the items in the dictionary are of type Output
        # for k in all_security_group_ids:
        #     print(k)
        #     if "public" in k:
        #         self.public_security_groups.append(all_security_group_ids[k])
        #     elif "private" in k:
        #         self.private_security_groups.append(all_security_group_ids[k])
        # self.security_group_ids = list(all_security_group_ids)

        # this may be unnecessary - it is a nice to have for the UI for now
        self.register_outputs({
            "vpc_id": vpc.id,
            "private_subnet_ids": self.private_subnets,
            "public_subnet_ids": self.public_subnet_ids,
            "security_group_ids": self.security_group_ids
        })
Exemplo n.º 27
0
    def _create_security_groups(self, vpcid):
        pub_name = "%s-public-sg" % self.name
        public_sg = ec2.SecurityGroup(pub_name,
                                      description=pub_name,
                                      vpc_id=vpcid,
                                      tags=self.sg_tags,
                                      __opts__=ResourceOptions(parent=self))

        priv_name = "%s-private-sg" % self.name
        private_sg = ec2.SecurityGroup(priv_name,
                                       description=priv_name,
                                       vpc_id=vpcid,
                                       tags=self.sg_tags,
                                       __opts__=ResourceOptions(parent=self))
        """
        Set up public rules:
            1. ingress from itself to itself
            2. ingress from private to public
            3. egress rule for all
            4. ingress rule for current IP address on 22
        """
        pub_ingress_itself = ec2.SecurityGroupRule(
            "public-ingress-from-itself",
            type="ingress",
            from_port=0,
            to_port=0,
            protocol=-1,
            security_group_id=public_sg.id,
            self=True,
            description="public ingress to/from itself",
            __opts__=ResourceOptions(parent=self))

        pub_ingress_private = ec2.SecurityGroupRule(
            "public-ingress-from-private",
            type="ingress",
            from_port=0,
            to_port=0,
            protocol=-1,
            security_group_id=public_sg.id,
            source_security_group_id=private_sg.id,
            description="public ingress from private",
            __opts__=ResourceOptions(parent=self))

        pub_egress = ec2.SecurityGroupRule(
            "public-egress",
            type="egress",
            cidr_blocks=["0.0.0.0/0"],
            from_port=0,
            to_port=0,
            protocol=-1,
            security_group_id=public_sg.id,
            description="egress traffic from public sg",
            __opts__=ResourceOptions(parent=self))

        current_ip = Util.get_workstation_ip()
        pub_ingress_current_ip = ec2.SecurityGroupRule(
            "public-ingress-from-current-ip",
            type="ingress",
            from_port=22,
            to_port=22,
            protocol="TCP",
            security_group_id=public_sg.id,
            cidr_blocks=[("%s/32" % current_ip)],
            description="ingress from current IP",
            __opts__=ResourceOptions(parent=self))
        """
        Set up private rules:
            1. ingress from public to it
            2. ingress from itself to itself
            3. egress rule for all
        """
        priv_ingress_itself = ec2.SecurityGroupRule(
            "private-ingress-from-itself",
            type="ingress",
            from_port=0,
            to_port=0,
            protocol=-1,
            security_group_id=private_sg.id,
            self=True,
            description="private ingress to itself",
            __opts__=ResourceOptions(parent=self))

        priv_ingress_public = ec2.SecurityGroupRule(
            "private-ingress-from-public",
            type="ingress",
            from_port=0,
            to_port=0,
            protocol=-1,
            security_group_id=private_sg.id,
            source_security_group_id=public_sg.id,
            description="private ingress from public",
            __opts__=ResourceOptions(parent=self))

        priv_egress = ec2.SecurityGroupRule(
            "private-egress",
            type="egress",
            cidr_blocks=["0.0.0.0/0"],
            from_port=0,
            to_port=0,
            protocol=-1,
            security_group_id=private_sg.id,
            description="egress traffic from private sg",
            __opts__=ResourceOptions(parent=self))

        return {"public": public_sg.id, "private": private_sg.id}
    def __init__(self, name, opts=None):
        super().__init__("nuage:aws:Analytics", name, None, opts)

        account_id = get_caller_identity().account_id
        region = config.region

        bucket = s3.Bucket(f"{name}Bucket")

        firehose_role = iam.Role(
            f"{name}FirehoseRole",
            assume_role_policy=get_firehose_role_trust_policy_document(account_id),
        )

        delivery_stream = kinesis.FirehoseDeliveryStream(
            f"{name}DeliveryStream",
            destination="extended_s3",
            extended_s3_configuration={
                "bucketArn": bucket.arn,
                "role_arn": firehose_role.arn,
                "compressionFormat": "GZIP",
            },
            opts=ResourceOptions(depends_on=[bucket, firehose_role]),
        )

        firehose_role_policy = iam.RolePolicy(
            f"{name}DeliveryStreamPolicy",
            role=firehose_role.name,
            policy=get_firehose_role_policy_document(
                region, account_id, bucket.arn, delivery_stream.name
            ).apply(json.dumps),
        )

        pinpoint_app = pinpoint.App(f"{name}PinpointApp")

        pinpoint_stream_role = iam.Role(
            f"{name}PinpointStreamRole",
            assume_role_policy=get_pinpoint_stream_role_trust_policy_document(),
        )

        pinpoint_stream_role_policy = iam.RolePolicy(
            f"{name}PinpointStreamPolicy",
            role=pinpoint_stream_role.name,
            policy=get_pinpoint_stream_role_policy_document(
                region, account_id, delivery_stream.name, pinpoint_app.application_id
            ).apply(json.dumps),
            opts=ResourceOptions(depends_on=[pinpoint_stream_role, delivery_stream]),
        )

        # IAM roles can take time to propogate so we have to add an artificial delay
        pinpoint_stream_role_delay = Delay(
            "EventStreamRoleDelay",
            10,
            opts=ResourceOptions(depends_on=[pinpoint_stream_role_policy]),
        )

        pinpoint_stream = pinpoint.EventStream(
            f"{name}PinpointEventStream",
            application_id=pinpoint_app.application_id,
            destination_stream_arn=delivery_stream.arn,
            role_arn=pinpoint_stream_role.arn,
            opts=ResourceOptions(
                depends_on=[delivery_stream, pinpoint_app, pinpoint_stream_role_delay,]
            ),
        )

        self.set_outputs(
            {
                "bucket_name": bucket.id,
                "delivery_stream_name": delivery_stream.name,
                "destination_stream_arn": delivery_stream.arn,
                "pinpoint_application_name": pinpoint_app.name,
                "pinpoint_application_id": pinpoint_app.application_id,
            }
        )
Exemplo n.º 29
0
def main() -> None:
    pulumi_config = pulumi.Config()
    artifacts = ArtifactGetter.from_config(pulumi_config)

    # These tags will be added to all provisioned infrastructure
    # objects.
    register_auto_tags({
        "pulumi:project": pulumi.get_project(),
        "pulumi:stack": config.STACK_NAME
    })

    upstream_stacks: Optional[UpstreamStacks] = None
    nomad_provider: Optional[pulumi.ProviderResource] = None
    consul_provider: Optional[pulumi.ProviderResource] = None
    if not config.LOCAL_GRAPL:
        upstream_stacks = UpstreamStacks()
        nomad_provider = get_nomad_provider_address(
            upstream_stacks.nomad_server)
        # Using get_output instead of require_output so that preview passes.
        # NOTE wimax Feb 2022: Not sure the above is still the case
        consul_master_token_secret_id = upstream_stacks.consul.get_output(
            "consul-master-token-secret-id")
        consul_provider = get_consul_provider_address(
            upstream_stacks.consul, {"token": consul_master_token_secret_id})

    pulumi.export("test-user-name", config.GRAPL_TEST_USER_NAME)
    test_user_password = TestUserPassword()
    pulumi.export("test-user-password-secret-id", test_user_password.secret_id)

    # TODO: temporarily disabled until we can reconnect the ApiGateway to the new
    # web UI.
    # jwt_secret = JWTSecret()

    dynamodb_tables = dynamodb.DynamoDB()

    # TODO: Create these emitters inside the service abstraction if nothing
    # else uses them (or perhaps even if something else *does* use them)
    sysmon_log_emitter = emitter.EventEmitter("sysmon-log")
    osquery_log_emitter = emitter.EventEmitter("osquery-log")
    unid_subgraphs_generated_emitter = emitter.EventEmitter(
        "unid-subgraphs-generated")
    subgraphs_generated_emitter = emitter.EventEmitter("subgraphs-generated")
    subgraphs_merged_emitter = emitter.EventEmitter("subgraphs-merged")
    dispatched_analyzer_emitter = emitter.EventEmitter("dispatched-analyzer")

    analyzer_matched_emitter = emitter.EventEmitter(
        "analyzer-matched-subgraphs")
    pulumi.export("analyzer-matched-subgraphs-bucket",
                  analyzer_matched_emitter.bucket_name)

    all_emitters = [
        sysmon_log_emitter,
        osquery_log_emitter,
        unid_subgraphs_generated_emitter,
        subgraphs_generated_emitter,
        subgraphs_merged_emitter,
        dispatched_analyzer_emitter,
        analyzer_matched_emitter,
    ]

    sysmon_generator_queue = ServiceQueue("sysmon-generator")
    sysmon_generator_queue.subscribe_to_emitter(sysmon_log_emitter)

    osquery_generator_queue = ServiceQueue("osquery-generator")
    osquery_generator_queue.subscribe_to_emitter(osquery_log_emitter)

    node_identifier_queue = ServiceQueue("node-identifier")
    node_identifier_queue.subscribe_to_emitter(
        unid_subgraphs_generated_emitter)

    graph_merger_queue = ServiceQueue("graph-merger")
    graph_merger_queue.subscribe_to_emitter(subgraphs_generated_emitter)

    analyzer_dispatcher_queue = ServiceQueue("analyzer-dispatcher")
    analyzer_dispatcher_queue.subscribe_to_emitter(subgraphs_merged_emitter)

    analyzer_executor_queue = ServiceQueue("analyzer-executor")
    analyzer_executor_queue.subscribe_to_emitter(dispatched_analyzer_emitter)

    engagement_creator_queue = ServiceQueue("engagement-creator")
    engagement_creator_queue.subscribe_to_emitter(analyzer_matched_emitter)

    analyzers_bucket = Bucket("analyzers-bucket", sse=True)
    pulumi.export("analyzers-bucket", analyzers_bucket.bucket)
    model_plugins_bucket = Bucket("model-plugins-bucket", sse=False)
    pulumi.export("model-plugins-bucket", model_plugins_bucket.bucket)

    plugins_bucket = Bucket("plugins-bucket", sse=True)
    pulumi.export("plugins-bucket", plugins_bucket.bucket)

    plugin_buckets = [
        analyzers_bucket,
        model_plugins_bucket,
    ]

    firecracker_s3objs = FirecrackerS3BucketObjects(
        "firecracker-s3-bucket-objects",
        plugins_bucket=plugins_bucket,
        firecracker_assets=FirecrackerAssets(
            "firecracker-assets",
            repository_name=config.cloudsmith_repository_name(),
            artifacts=artifacts,
        ),
    )

    # To learn more about this syntax, see
    # https://docs.rs/env_logger/0.9.0/env_logger/#enabling-logging
    rust_log_levels = ",".join([
        "DEBUG",
        "h2::codec=WARN",
        "hyper=WARN",
        "rusoto_core=WARN",
        "rustls=WARN",
        "serde_xml_rs=WARN",
    ])
    py_log_level = "DEBUG"

    aws_env_vars_for_local = _get_aws_env_vars_for_local()
    pulumi.export("aws-env-vars-for-local", aws_env_vars_for_local)

    # These are shared across both local and prod deployments.
    nomad_inputs: Final[NomadVars] = dict(
        analyzer_bucket=analyzers_bucket.bucket,
        analyzer_dispatched_bucket=dispatched_analyzer_emitter.bucket_name,
        analyzer_dispatcher_queue=analyzer_dispatcher_queue.main_queue_url,
        analyzer_executor_queue=analyzer_executor_queue.main_queue_url,
        analyzer_matched_subgraphs_bucket=analyzer_matched_emitter.bucket_name,
        analyzer_dispatcher_dead_letter_queue=analyzer_dispatcher_queue.
        dead_letter_queue_url,
        aws_env_vars_for_local=aws_env_vars_for_local,
        aws_region=aws.get_region().name,
        container_images=_container_images(artifacts),
        engagement_creator_queue=engagement_creator_queue.main_queue_url,
        graph_merger_queue=graph_merger_queue.main_queue_url,
        graph_merger_dead_letter_queue=graph_merger_queue.
        dead_letter_queue_url,
        model_plugins_bucket=model_plugins_bucket.bucket,
        node_identifier_queue=node_identifier_queue.main_queue_url,
        node_identifier_dead_letter_queue=node_identifier_queue.
        dead_letter_queue_url,
        node_identifier_retry_queue=node_identifier_queue.retry_queue_url,
        osquery_generator_queue=osquery_generator_queue.main_queue_url,
        osquery_generator_dead_letter_queue=osquery_generator_queue.
        dead_letter_queue_url,
        py_log_level=py_log_level,
        rust_log=rust_log_levels,
        schema_properties_table_name=dynamodb_tables.schema_properties_table.
        name,
        schema_table_name=dynamodb_tables.schema_table.name,
        session_table_name=dynamodb_tables.dynamic_session_table.name,
        subgraphs_merged_bucket=subgraphs_merged_emitter.bucket_name,
        subgraphs_generated_bucket=subgraphs_generated_emitter.bucket_name,
        sysmon_generator_queue=sysmon_generator_queue.main_queue_url,
        sysmon_generator_dead_letter_queue=sysmon_generator_queue.
        dead_letter_queue_url,
        test_user_name=config.GRAPL_TEST_USER_NAME,
        unid_subgraphs_generated_bucket=unid_subgraphs_generated_emitter.
        bucket_name,
        user_auth_table=dynamodb_tables.user_auth_table.name,
        user_session_table=dynamodb_tables.user_session_table.name,
        plugin_registry_kernel_artifact_url=firecracker_s3objs.
        kernel_s3obj_url,
        plugin_s3_bucket_aws_account_id=config.AWS_ACCOUNT_ID,
        plugin_s3_bucket_name=plugins_bucket.bucket,
    )

    provision_vars: Final[NomadVars] = {
        "test_user_password_secret_id":
        test_user_password.secret_id,
        **_get_subset(
            nomad_inputs,
            {
                "aws_env_vars_for_local",
                "aws_region",
                "container_images",
                "py_log_level",
                "schema_properties_table_name",
                "schema_table_name",
                "test_user_name",
                "user_auth_table",
            },
        ),
    }

    nomad_grapl_core_timeout = "5m"

    kafka = Kafka(
        "kafka",
        confluent_environment_name=pulumi_config.require(
            "confluent-environment-name"),
    )
    e2e_service_credentials = kafka.service_credentials(
        service_name="e2e-test-runner")

    pulumi.export("kafka-bootstrap-servers", kafka.bootstrap_servers())
    pulumi.export("kafka-e2e-sasl-username",
                  e2e_service_credentials.apply(lambda c: c.api_key))
    pulumi.export("kafka-e2e-sasl-password",
                  e2e_service_credentials.apply(lambda c: c.api_secret))
    pulumi.export("kafka-e2e-consumer-group-name",
                  kafka.consumer_group("e2e-test-runner"))

    nomad_grapl_ingress = NomadJob(
        "grapl-ingress",
        jobspec=path_from_root("nomad/grapl-ingress.nomad").resolve(),
        vars={},
        opts=pulumi.ResourceOptions(provider=nomad_provider),
    )

    ConsulIntentions(
        "consul-intentions",
        # consul-intentions are stored in the nomad directory so that engineers remember to create/update intentions
        # when they update nomad configs
        intention_directory=path_from_root(
            "nomad/consul-intentions").resolve(),
        opts=pulumi.ResourceOptions(provider=consul_provider),
    )

    if config.LOCAL_GRAPL:
        ###################################
        # Local Grapl
        ###################################
        organization_management_db = LocalPostgresInstance(
            name="organization-management-db",
            port=5632,
        )

        plugin_registry_db = LocalPostgresInstance(
            name="plugin-registry-db",
            port=5432,
        )

        plugin_work_queue_db = LocalPostgresInstance(
            name="plugin-work-queue-db",
            port=5532,
        )

        pulumi.export("plugin-work-queue-db-hostname",
                      plugin_work_queue_db.hostname)
        pulumi.export("plugin-work-queue-db-port",
                      str(plugin_work_queue_db.port))
        pulumi.export("plugin-work-queue-db-username",
                      plugin_work_queue_db.username)
        pulumi.export("plugin-work-queue-db-password",
                      plugin_work_queue_db.password)

        # TODO: ADD EXPORTS FOR PLUGIN-REGISTRY

        pulumi.export("organization-management-db-hostname",
                      organization_management_db.hostname)
        pulumi.export("organization-management-db-port",
                      str(organization_management_db.port))
        pulumi.export("organization-management-db-username",
                      organization_management_db.username)
        pulumi.export("organization-management-db-password",
                      organization_management_db.password)

        redis_endpoint = f"redis://{config.HOST_IP_IN_NOMAD}:6379"

        pulumi.export("redis-endpoint", redis_endpoint)

        local_grapl_core_vars: Final[NomadVars] = dict(
            organization_management_db_hostname=organization_management_db.
            hostname,
            organization_management_db_port=str(
                organization_management_db.port),
            organization_management_db_username=organization_management_db.
            username,
            organization_management_db_password=organization_management_db.
            password,
            plugin_registry_db_hostname=plugin_registry_db.hostname,
            plugin_registry_db_port=str(plugin_registry_db.port),
            plugin_registry_db_username=plugin_registry_db.username,
            plugin_registry_db_password=plugin_registry_db.password,
            plugin_work_queue_db_hostname=plugin_work_queue_db.hostname,
            plugin_work_queue_db_port=str(plugin_work_queue_db.port),
            plugin_work_queue_db_username=plugin_work_queue_db.username,
            plugin_work_queue_db_password=plugin_work_queue_db.password,
            redis_endpoint=redis_endpoint,
            **nomad_inputs,
        )

        nomad_grapl_core = NomadJob(
            "grapl-core",
            jobspec=path_from_root("nomad/grapl-core.nomad").resolve(),
            vars=local_grapl_core_vars,
            opts=ResourceOptions(custom_timeouts=CustomTimeouts(
                create=nomad_grapl_core_timeout,
                update=nomad_grapl_core_timeout)),
        )

        nomad_grapl_provision = NomadJob(
            "grapl-provision",
            jobspec=path_from_root("nomad/grapl-provision.nomad").resolve(),
            vars=provision_vars,
            opts=pulumi.ResourceOptions(depends_on=[nomad_grapl_core.job]),
        )

    else:
        ###################################
        # AWS Grapl
        ###################################
        # We use stack outputs from internally developed projects
        # We assume that the stack names will match the grapl stack name
        assert upstream_stacks, "Upstream stacks previously initialized"

        vpc_id = upstream_stacks.networking.require_output("grapl-vpc")
        subnet_ids = upstream_stacks.networking.require_output(
            "grapl-private-subnet-ids")
        nomad_agent_security_group_id = upstream_stacks.nomad_agents.require_output(
            "security-group")
        nomad_agent_alb_security_group_id = upstream_stacks.nomad_agents.require_output(
            "alb-security-group")
        nomad_agent_alb_listener_arn = upstream_stacks.nomad_agents.require_output(
            "alb-listener-arn")
        nomad_agent_subnet_ids = upstream_stacks.networking.require_output(
            "nomad-agents-private-subnet-ids")
        nomad_agent_role = aws.iam.Role.get(
            "nomad-agent-role",
            id=upstream_stacks.nomad_agents.require_output("iam-role"),
            # NOTE: It's somewhat odd to set a StackReference as a parent
            opts=pulumi.ResourceOptions(parent=upstream_stacks.nomad_agents),
        )

        availability_zone: pulumi.Output[str] = pulumi.Output.from_input(
            subnet_ids).apply(subnets_to_single_az)

        for _bucket in plugin_buckets:
            _bucket.grant_put_permission_to(nomad_agent_role)
            # Analyzer Dispatcher needs to be able to ListObjects on Analyzers
            # Analyzer Executor needs to be able to ListObjects on Model Plugins
            _bucket.grant_get_and_list_to(nomad_agent_role)
        for _emitter in all_emitters:
            _emitter.grant_write_to(nomad_agent_role)
            _emitter.grant_read_to(nomad_agent_role)

        cache = Cache(
            "main-cache",
            subnet_ids=subnet_ids,
            vpc_id=vpc_id,
            nomad_agent_security_group_id=nomad_agent_security_group_id,
        )

        organization_management_postgres = Postgres(
            name="organization-management",
            subnet_ids=subnet_ids,
            vpc_id=vpc_id,
            availability_zone=availability_zone,
            nomad_agent_security_group_id=nomad_agent_security_group_id,
        )

        plugin_registry_postgres = Postgres(
            name="plugin-registry",
            subnet_ids=subnet_ids,
            vpc_id=vpc_id,
            availability_zone=availability_zone,
            nomad_agent_security_group_id=nomad_agent_security_group_id,
        )

        plugin_work_queue_postgres = Postgres(
            name="plugin-work-queue",
            subnet_ids=subnet_ids,
            vpc_id=vpc_id,
            availability_zone=availability_zone,
            nomad_agent_security_group_id=nomad_agent_security_group_id,
        )

        pulumi.export(
            "organization-management-db-hostname",
            organization_management_postgres.host(),
        )
        pulumi.export(
            "organization-management-db-port",
            organization_management_postgres.port().apply(str),
        )
        pulumi.export(
            "organization-management-db-username",
            organization_management_postgres.username(),
        )
        pulumi.export(
            "organization-management-db-password",
            organization_management_postgres.password(),
        )

        pulumi.export("plugin-work-queue-db-hostname",
                      plugin_work_queue_postgres.host())
        pulumi.export("plugin-work-queue-db-port",
                      plugin_work_queue_postgres.port().apply(str))
        pulumi.export(
            "plugin-work-queue-db-username",
            plugin_work_queue_postgres.username(),
        )
        pulumi.export(
            "plugin-work-queue-db-password",
            plugin_work_queue_postgres.password(),
        )

        pulumi.export("kafka-bootstrap-servers", kafka.bootstrap_servers())
        pulumi.export("redis-endpoint", cache.endpoint)

        prod_grapl_core_vars: Final[NomadVars] = dict(
            # The vars with a leading underscore indicate that the hcl local version of the variable should be used
            # instead of the var version.
            organization_management_db_hostname=organization_management_postgres
            .host(),
            organization_management_db_port=organization_management_postgres.
            port().apply(str),
            organization_management_db_username=organization_management_postgres
            .username(),
            organization_management_db_password=organization_management_postgres
            .password(),
            plugin_registry_db_hostname=plugin_registry_postgres.host(),
            plugin_registry_db_port=plugin_registry_postgres.port().apply(str),
            plugin_registry_db_username=plugin_registry_postgres.username(),
            plugin_registry_db_password=plugin_registry_postgres.password(),
            plugin_work_queue_db_hostname=plugin_work_queue_postgres.host(),
            plugin_work_queue_db_port=plugin_work_queue_postgres.port().apply(
                str),
            plugin_work_queue_db_username=plugin_work_queue_postgres.username(
            ),
            plugin_work_queue_db_password=plugin_work_queue_postgres.password(
            ),
            redis_endpoint=cache.endpoint,
            **nomad_inputs,
        )

        nomad_grapl_core = NomadJob(
            "grapl-core",
            jobspec=path_from_root("nomad/grapl-core.nomad").resolve(),
            vars=prod_grapl_core_vars,
            opts=pulumi.ResourceOptions(
                provider=nomad_provider,
                custom_timeouts=CustomTimeouts(
                    create=nomad_grapl_core_timeout,
                    update=nomad_grapl_core_timeout),
            ),
        )

        nomad_grapl_provision = NomadJob(
            "grapl-provision",
            jobspec=path_from_root("nomad/grapl-provision.nomad").resolve(),
            vars=provision_vars,
            opts=pulumi.ResourceOptions(
                depends_on=[
                    nomad_grapl_core.job,
                ],
                provider=nomad_provider,
            ),
        )

        api_gateway = ApiGateway(
            "grapl-api-gateway",
            nomad_agents_alb_security_group=nomad_agent_alb_security_group_id,
            nomad_agents_alb_listener_arn=nomad_agent_alb_listener_arn,
            nomad_agents_private_subnet_ids=nomad_agent_subnet_ids,
            opts=pulumi.ResourceOptions(depends_on=[nomad_grapl_ingress.job
                                                    ], ),
        )
        pulumi.export("stage-url", api_gateway.stage.invoke_url)

        # Describes resources that should be destroyed/updated between
        # E2E-in-AWS runs.
        pulumi.export(
            "stateful-resource-urns",
            [
                # grapl-core contains our dgraph instances
                nomad_grapl_core.urn,
                # We need to re-provision after we start a new dgraph
                nomad_grapl_provision.urn,
                dynamodb_tables.urn,
            ],
        )

    OpsAlarms(name="ops-alarms")
Exemplo n.º 30
0
    def _provision_helper_vm(self):
        init_script = r"""#!/bin/bash
echo 'net.ipv4.conf.default.rp_filter = 2' >> /etc/sysctl.conf
echo 'net.ipv4.conf.all.rp_filter = 2' >> /etc/sysctl.conf
/usr/sbin/sysctl -p /etc/sysctl.conf
"""
        sg = compute.SecGroup(
            "helper-vm-sg",
            description="allow ssh",
            rules=[
                compute.SecGroupRuleArgs(
                    cidr="0.0.0.0/0", from_port=22, to_port=22, ip_protocol="tcp"
                )
            ],
        )
        external_port = networking.Port(
            "helper-vm-external-port",
            network_id=self.resources.mgmt_network.id,
            fixed_ips=[
                networking.PortFixedIpArgs(
                    subnet_id=self.resources.mgmt_subnet.id,
                    ip_address=self.props.helper_vm["ip"],
                )
            ],
            security_group_ids=[sg.id],
        )
        helper_vm = compute.Instance(
            "helper-vm",
            name="helper-vm",
            flavor_id=self.props.helper_vm["flavor_id"],
            image_name=self.props.helper_vm["image_name"],
            networks=[
                compute.InstanceNetworkArgs(name=self.props.deploy_network["name"]),
            ],
            key_pair=self.props.keypair_name,
            user_data=init_script,
            opts=ResourceOptions(
                delete_before_replace=True,
                ignore_changes=["image_name"],
            ),
        )
        attach_external_ip = compute.InterfaceAttach(
            "helper-vm-attatch",
            instance_id=helper_vm.id,
            port_id=external_port.id,
            opts=ResourceOptions(delete_before_replace=True, depends_on=[helper_vm]),
        )

        # configure helper vm
        conn_args = ConnectionArgs(
            host=self.props.helper_vm["ip"],
            username="******",
            private_key_file=self.props.private_key_file,
        )
        exec_install_pwsh = RemoteExec(
            "install-powershell",
            host_id=helper_vm.id,
            conn=conn_args,
            commands=[
                "[ ! -f packages-microsoft-prod.deb ] && wget -q https://packages.microsoft.com/config/ubuntu/20.04/packages-microsoft-prod.deb || true",
                "sudo dpkg -i packages-microsoft-prod.deb",
                "sudo apt-get update",
                "echo 'debconf debconf/frontend select Noninteractive' | sudo debconf-set-selections",
                "sudo apt-get install -y -q powershell",
                "pwsh -Command Set-PSRepository -Name 'PSGallery' -InstallationPolicy Trusted",
                "pwsh -Command Install-Module VMware.PowerCLI",
                "pwsh -Command Set-PowerCLIConfiguration -InvalidCertificateAction Ignore -Confirm:0",
                "pwsh -Command Set-PowerCLIConfiguration -Scope User -ParticipateInCEIP 0 -Confirm:0",
            ],
            opts=ResourceOptions(depends_on=[attach_external_ip]),
        )

        # copy rsa key
        CopyFile(
            "copy-rsa-key",
            host_id=helper_vm.id,
            conn=conn_args,
            src=self.props.private_key_file,
            dest="/home/ccloud/esxi_rsa",
            mode="600",
            opts=ResourceOptions(depends_on=[attach_external_ip]),
        )

        # copy from path relative to the project root
        CopyFile(
            "copy-cleanup",
            host_id=helper_vm.id,
            conn=conn_args,
            src="./scripts/cleanup.sh",
            dest="/home/ccloud/cleanup.sh",
            opts=ResourceOptions(depends_on=[attach_external_ip]),
        )
        with open("./scripts/config.sh") as f:
            template = jinja2.Template(f.read())
            config_script = template.render(
                management_network=self.props.mgmt_network,
            )
            CopyFileFromString(
                "copy-config-sh",
                host_id=helper_vm.id,
                conn=conn_args,
                from_str=config_script,
                dest="/home/ccloud/config.sh",
                opts=ResourceOptions(depends_on=[attach_external_ip]),
            )

        pulumi.export(
            "HelperVM",
            Output.all(
                helper_vm.name, helper_vm.id, external_port.all_fixed_ips[0]
            ).apply(lambda args: f"{args[0]} ({args[1]}, {args[2]})"),
        )