def linking_nodes_diagram(): from diagrams import Diagram, Cluster from diagrams.aws.compute import EC2 from diagrams.aws.network import ELB from diagrams.aws.network import Route53 from diagrams.onprem.database import PostgreSQL # Would typically use RDS from aws.database from diagrams.onprem.inmemory import Redis # Would typically use ElastiCache from aws.database with Diagram( "Simple Website Diagram", direction='LR' ) as diag4: # It's LR by default, but you have a few options with the orientation dns = Route53("DNS") load_balancer = ELB("Load Balancer") database = PostgreSQL("User Database") cache = Redis("Cache") with Cluster("Webserver Cluster"): svc_group = [ EC2("Webserver 1"), EC2("Webserver 2"), EC2("Webserver 3") ] dns >> load_balancer >> svc_group svc_group >> cache svc_group >> database print( diag4 ) # This will illustrate the diagram if you are using a Google Colab or Jypiter notebook.
def generate(name, num_of_instances): with Diagram(name, direction='TB', show=True): with Cluster('Auto Scaling Group'): app_group = [EC2Instance('App') for _ in range(num_of_instances)] rds_master = RDS('DB (Master)') rds_standby = RDS('DB (Standby)') Users('Users') >> Route53('Route 53') >> ALB('ALB') >> app_group >> rds_master rds_master - rds_standby
def test3(): filename = os.path.join(img_dir, sys._getframe().f_code.co_name) with Diagram("Simple Web Service with DB Cluster", show=False, filename=filename): dns = Route53("dns") web = ECS("service") with Cluster("DB Cluster"): db_master = RDS("master") db_master - [RDS("slave1"), RDS("slave2")] dns >> web >> db_master
def nodes_diagram(): with Diagram("Simple Website Diagram") as diag2: dns = Route53("dns") load_balancer = ELB("Load Balancer") database = PostgreSQL("User Database") cache = Redis("Cache") svc_group = [ EC2("Webserver 1"), EC2("Webserver 2"), EC2("Webserver 3") ] print( diag2 ) # This will illustrate the diagram if you are using a Google Colab or Jypiter notebook.
def draw(name: str, *, show: bool = False) -> None: with Diagram(name): dns = Route53() lb = ELB() with Cluster() as Services: web1 = ECS() web2 = ECS() web3 = ECS() with Cluster() as DBCluster: userdb = RDS() userdb_ro = RDS() userdb - [userdb_ro] memcached = ElastiCache() dns >> lb >> Services Services >> DBCluster Services >> memcached
def linking_nodes_diagram(): with Diagram( "Simple Website Diagram", direction='LR' ) as diag4: # It's LR by default, but you have a few options with the orientation dns = Route53("DNS") load_balancer = ELB("Load Balancer") database = PostgreSQL("User Database") cache = Redis("Cache") with Cluster("Webserver Cluster"): svc_group = [ EC2("Webserver 1"), EC2("Webserver 2"), EC2("Webserver 3") ] dns >> load_balancer >> svc_group svc_group >> cache svc_group >> database print( diag4 ) # This will illustrate the diagram if you are using a Google Colab or Jypiter notebook.
def nodes_diagram(): from diagrams import Diagram, Cluster from diagrams.aws.compute import EC2 from diagrams.aws.network import ELB from diagrams.aws.network import Route53 from diagrams.onprem.database import PostgreSQL # Would typically use RDS from aws.database from diagrams.onprem.inmemory import Redis # Would typically use ElastiCache from aws.database with Diagram("Simple Website Diagram") as diag2: dns = Route53("dns") load_balancer = ELB("Load Balancer") database = PostgreSQL("User Database") cache = Redis("Cache") svc_group = [ EC2("Webserver 1"), EC2("Webserver 2"), EC2("Webserver 3") ] print( diag2 ) # This will illustrate the diagram if you are using a Google Colab or Jypiter notebook.
with Cluster("Mysql active-active cluster"): db_master = onprMysql("userdb") db_master - [onprMysql("userdb")] users - dns >> web_servers web_servers >> Backbone Backbone >> app_servers app_servers >> db_master app_servers >> nfs with Diagram("AWS web application", show=True): users = Users('website/mobile users') with Cluster("Ingress"): dns = Route53("Route53") with Cluster("Cloudfront CDN"): s3_content = S3('Shared content') cf = CF('Cloudfront CDN') with Cluster('VPC'): with Cluster("WebProxy AutoScalingGroup (ASG)"): web_asg = AutoScaling('ASG') web_lb = ELB("NLB") with Cluster("Application servers AutoScalingGroup (ASG)"): app_asg = AutoScaling('ASG') app_lb = ELB("NLB") with Cluster("AWS Batch"):
with Cluster('Virtual Private Cloud'): kube = EKS('Elastic Kubernetes\nService') instance_1 = EC2('EC2 Instance 1\n(m5.xlarge)') disk_1 = EBS('EBS Disk 1\n(gp2)') instance_2 = EC2('EC2 Instance 2\n(m5.xlarge)') disk_2 = EBS('EBS Disk 2\n(gp2)') instance_1 - disk_1 instance_2 - disk_2 kube - instance_1 kube - instance_2 kube - RDS('Amazon RDS\nfor PostgreSQL\n(db.t3.large)') kube - S3('S3') kube - Cloudwatch('Amazon CloudWatch') dns = Route53('Route53') lb = ELB('Elastic Load Balancer') lb >> kube dns >> lb [Client('SDKs'), Users('Users')] >> dns with Diagram('Google Cloud Platform resources', show=False, filename='gke', outformat='png'): with Cluster('Google Cloud Platform'): with Cluster('Virtual Private Cloud'): kube = GKE('Google Kubernetes\nEngine') instance_1 = ComputeEngine('VM Instance 1\n(n1-standard-4)') disk_1 = PersistentDisk('Persistent Disk 1\n(pd-standard)')
from diagrams import Cluster, Diagram from diagrams.aws.network import VPC, PublicSubnet, PrivateSubnet, Endpoint, ELB, Route53 from diagrams.aws.compute import ECS, ECR, EC2, AutoScaling from diagrams.aws.management import Cloudwatch with Diagram("Simple ECS Service", show=True): with Cluster("AWS Account"): with Cluster("Hosted Zone\nflavio.com"): maindomain = Route53("webservices.*.flavio.com") secondarydomain = Route53("ws.*.flavio.com") with Cluster("ECR"): ecr = ECR("Webservices Image") with Cluster("VPC"): PrivateSubnet("Private Subnet") with Cluster("Loadbalancing"): loadbalancer = ELB("Loadbalancer\nEndpoint") [maindomain, secondarydomain] >> loadbalancer with Cluster("ECS Cluster"): clusterecs = ECS("Webservices-Prod") autoscalingclusterecs = AutoScaling("Cluster Scaling") ec2 = EC2("EC2 Instances")
from diagrams import Diagram from diagrams.aws.network import CloudFront from diagrams.aws.network import Route53 from diagrams.aws.storage import S3 from diagrams.aws.security import CertificateManager with Diagram("Static Website Hosting", show=False): [Route53("Zone"), CertificateManager("ACM SSL Cert")] >> CloudFront("CDN") >> S3("Bucket")
from diagrams import Cluster, Diagram, Edge from diagrams.onprem.client import User from diagrams.aws.compute import ECS from diagrams.aws.storage import S3 from diagrams.aws.network import Route53, CloudFront, ALB from diagrams.aws.database import RDS from diagrams.aws.devtools import Codedeploy from diagrams.onprem.ci import GithubActions from diagrams.onprem.vcs import Github with Diagram(name="ningenme.net", filename="public/diagrams/diagram", show=False): user = User("Client") with Cluster("AWS"): with Cluster("route53"): route53NingenmeNet = Route53("ningenme.net") route53ApiNingenmeNet = Route53("api.ningenme.net") route53StaticNingenmeNet = Route53("static.ningenme.net") with Cluster("cloud front"): cloudFrontNingenmeNet = CloudFront("ningenme.net") cloudFrontApiNingenmeNet = CloudFront("api.ningenme.net") cloudFrontStaticNingenmeNet = CloudFront("static.ningenme.net") with Cluster("s3"): s3StaticNingenmeNet = S3("static.ningenme.net") with Cluster("alb"): albNetFront = ALB("net-front") albNetApi = ALB("net-api") with Cluster("ecs-cluster"): ecsNetFront = ECS("net-front") ecsNetApi = ECS("net-api") with Cluster("db"):
from diagrams.aws.integration import Eventbridge, SNS, SQS, Appsync, StepFunctions from diagrams.aws.analytics import Kinesis, Athena, Quicksight, ES, ElasticsearchService, Analytics, DataPipeline, Glue, EMR, KinesisDataAnalytics, KinesisDataFirehose, KinesisDataStreams, LakeFormation, Redshift, ManagedStreamingForKafka from diagrams.aws.ml import Sagemaker, Comprehend, Rekognition, Forecast, Personalize, Polly, Textract, Transcribe, Translate, MachineLearning, Lex, SagemakerNotebook, SagemakerModel, SagemakerTrainingJob with Diagram("Basic Serverless Application", show=False): with Cluster("Corporate Data Center - US"): pf = TradicionalServer("PingFederate") users = Users("Users") with Cluster("Account: account-1"): dc = DirectConnect("Direct Connect") with Cluster("Region: us-east-1"): cognito = Cognito("Cognito") rt53 = Route53("Route 53") waf = WAF("WAF") acm = ACM("TLS Certificate") cf = CloudFront("CloudFront") apig = APIGateway("REST API") sm = SecretsManager("Secrets Manager") ddb = Dynamodb("DynamoDB") s3 = S3("static web assets") with Cluster("VPC"): with Cluster("Availability Zone 1"): with Cluster("Private Subnet 1"): sm_vpce = Endpoint("VPC Endpoint") ddb_vpce = Endpoint("VPC Endpoint") rds_primary = RDS("Oracle RDS")
from diagrams import Diagram, Cluster, Edge from diagrams.aws.network import Route53 from diagrams.onprem.database import Postgresql from diagrams.gcp.network import LoadBalancing from diagrams.onprem.monitoring import Prometheus, Grafana from diagrams.onprem.client import Users, Client from diagrams.k8s.compute import Pod from diagrams.k8s.infra import Master from diagrams.k8s.controlplane import API from diagrams.k8s.network import Ingress, Service from diagrams.onprem.network import Traefik from diagrams.programming.language import Python with Diagram("Simplified Orchestra Architecture Diagram", show=False): dns = Route53("Wildcard DNS\n*.bioc.cancerdatasci.org") lb = LoadBalancing("Load Balancer") pg = Postgresql("AWS Aurora PostgreSQL") users = Users("Workshop\nParticipants") web = Client("Standard Web Browser") with Cluster("Kubernetes Cluster"): app = Python("Orchestra") master = Master("Kubernetes Master\nRunning on GKE") k8api = API("Kubernetes Control API") s = [] w = [] ing = Traefik("Router & Proxy") ing >> app app >> pg app >> k8api k8api >> master
def build_arch_diag(self, account_dict, account_refs_dict): with Diagram(account_dict['account_alias'], show=True, direction="TB"): ## TIER 1: EXTERNAL TO AWS CLUSTERLESS ## Contains: Users, DNS, WAF, CloudFront, Cross Account VPC Peering (happens out of order) ## ------------------------------------------------------------------------------- ## Users users = Users('Actors') self.ingress.append(users) ## DNS if account_dict['dns']['name'] == 'Route53': dns = Route53('Route53') else: dns = TradicionalServer('External DNS') self.ingress.append(dns) ## WAF try: if account_dict['waf']['cloudfront']: waf = WAF('WAF - CloudFront') except KeyError: waf = None ## CloudFront if self.check_key(account_dict, 'cf'): cloudfront = CloudFront('CloudFront') self.ingress.append(cloudfront) cf_check = True ## CAVP cross_accounts = self.check_cross_account_refs(account_dict) ## TIER 2: ACCOUNT CLUSTER ## Contains: CAVP Accounts, Account ID, Account Alias, Global S3 Buckets ## ------------------------------------------------------------------------------- ## CAVP Accounts for x_account in cross_accounts: ## Since x_account is the key value for the account dict - xaccount_dict = cross_accounts['{}'.format(x_account)] ## Next with Accepter Accounts/VPC's/Nodes try: if xaccount_dict['xacc_account']: alias = self.get_account_refs( xaccount_dict['xacc_account'], account_refs_dict) with Cluster('Account: {}'.format(alias), graph_attr={ 'margin': '50', 'bgcolor': 'white', 'penwidth': '5' }): ## Make the cross account VPC with Cluster(xaccount_dict['xacc_vpc'], graph_attr={'bgcolor': 'white'}): ## Create the accuester node and add it to the dictionary with the ## Connection ID for diagram connection later. xacc_node = VPCPeering( 'VPC Peering - Accepter') cross_accounts['{}'.format( x_account)]['xacc_node'] = xacc_node except KeyError: try: if xaccount_dict['xreq_account']: alias = self.get_account_refs( xaccount_dict['xreq_account'], account_refs_dict) with Cluster('Account: {}'.format(alias), graph_attr={ 'margin': '50', 'bgcolor': 'white', 'penwidth': '5' }): ## Make the cross account VPC with Cluster(xaccount_dict['xreq_vpc'], graph_attr={'bgcolor': 'white'}): ## Create the Requester node and add it to the dictionary with the ## Connection ID for diagram connection later. xreq_node = VPCPeering( 'VPC Peering - Requester') cross_accounts['{}'.format( x_account)]['xreq_node'] = xreq_node except KeyError: continue ## Account ID and Alias with Cluster('Account: {}'.format(account_dict['account_alias']), graph_attr={ 'margin': '150', 'bgcolor': 'white', 'penwidth': '5' }): ## Global S3 Buckets s3_overflow = {} ## Diagram S3's in alternate Regions for s3 in account_dict['s3']: if s3['region'] == None: s3_node = SimpleStorageServiceS3(s3['Name']) else: ## If the S3 bucket isn't in one of our known AZ's, ## append to a list for the new region if s3['region'] in s3_overflow.keys(): s3_overflow[s3['region']].append(s3) else: ## If the S3 in a region that doesn't have an existing AZ, ## create a new key for the region and put the nodes in it for az in account_dict['az']: if az['RegionName'] != s3['region']: s3_overflow[s3['region']] = [] s3_overflow[s3['region']].append(s3) ## Create new cluster for each region in s3_overflow for region in account_dict['regions']: if region in s3_overflow.keys(): with Cluster(region, graph_attr={ 'bgcolor': 'white', 'style': 'dashed', 'pencolor': 'blue' }): for s3 in s3_overflow[region]: s3_node = SimpleStorageServiceS3(s3['Name']) ## TIER 3: VPC CLUSTER ## Contains: VPC, CAVP, VPC Peering, Internet Gateways, Load balancers, S3 buckets ## ------------------------------------------------------------------------------- ## VPC for v in range(len(account_dict['vpc'])): ## Dynamically add contents of VPC Cluster with Cluster(account_dict['vpc'][v]['VpcId'], graph_attr={'bgcolor': 'white'}): ## CAVP and VPC Peering account_dict['vpc'][v]['RequestNodes'] = [] account_dict['vpc'][v]['AcceptNodes'] = [] ## Create VPC Peering nodes, but connections can only be made once ## all Requester and Accepter nodes have been created. Store the nodes ## in a list for later reference, as dynamically created nodes can't be ## referred to uniquely and will need to be iterated upon later. for req in account_dict['vpc'][v]['PeerRequest']: if req['RequesterVpcInfo'][ 'OwnerId'] == account_dict['account_id']: req_node = VPCPeering( 'VPC Peering - Requester') cross_accounts[req['VpcPeeringConnectionId']][ 'req_node'] = req_node for acc in account_dict['vpc'][v]['PeerAccept']: if acc['AccepterVpcInfo'][ 'OwnerId'] == account_dict['account_id']: acc_node = VPCPeering('VPC Peering - Accepter') cross_accounts[acc['VpcPeeringConnectionId']][ 'acc_node'] = acc_node ## Internet Gateway try: ## Check if Internet Gateway exists if account_dict['igw']: for igw in account_dict['igw']: for attach in igw['Attachments']: ## If the IGW is attached to the VPC and available ## add it to igws list, which we will attach the ## previous ingress point to if attach['VpcId'] == account_dict[ 'vpc'][v]['VpcId'] and attach[ 'State'] == 'attached': internet_gw = InternetGateway( igw['InternetGatewayId']) igws.append(internet_gw) ig_check = True if len(igws) > 0: ## Append igws list to ingress list as the next connection point self.ingress.append(igws) except KeyError: ig_check = False ## Load Balancer try: for elb in account_dict['elb']: ## Check if a WAF is associated with the Load Balancer #waf_check = check_key(elb) elastic_lb = ElasticLoadBalancing( '{} ({})'.format(elb['DNSName'], elb['Scheme'])) for az in elb['AvailabilityZones']: elb_tuple = elastic_lb, az['SubnetId'] self.elb_subnets.append(elb_tuple) if elb['Scheme'] == 'internet-facing' and elb[ 'State'] == 'active': ## If the Load Balancer is internet-facing, a WAF should be put in place self.ingress.append(elastic_lb) elb_check = True except KeyError: elb_check = False ## TIER 4: REGION CLUSTER ## Contains: S3 buckets, WAF Regional, DynamoDB ## ------------------------------------------------------------------------------- region_list = self.kill_dupes(account_dict['regions']) for r in range(len(region_list)): for a in range(len(account_dict['az'])): if account_dict['az'][a][ 'RegionName'] == region_list[r]: with Cluster(region_list[r], graph_attr=self.region_sheet): for s3 in account_dict['s3']: if s3['region'] == region_list[r]: s3_node = SimpleStorageServiceS3( s3['Name']) ## WAF Regional ## PLACEHOLDER ## DynamoDB for ddb in account_dict['ddb']: ddb_region = account_dict['ddb'][ ddb]['TableArn'].split(':')[3] if ddb_region == region_list[r]: ddb_node = Dynamodb( account_dict['ddb'][ddb] ['TableName']) ## TIER 5: AVAILABILITY ZONE (AZ) CLUSTER ## Contains: RDS ## ------------------------------------------------------------------------------- with Cluster(account_dict['az'][a] ['ZoneName'], graph_attr={ 'bgcolor': 'white', 'style': 'dashed', 'pencolor': 'orange' }): ## Dynamically add RDS for rds in account_dict['rds']: if rds['AvailabilityZone'] == account_dict[ 'az'][a]['ZoneName']: rds_node = RDS( rds['DBName']) ## TIER 6: SUBNET CLUSTER ## Contains: Security Groups, EC2 Instances ## ------------------------------------------------------------------------------- for s in range( len(account_dict['az'][a] ['Subnets'])): with Cluster(account_dict['az'] [a]['Subnets'][s] ['SubnetId']): for rezzie in account_dict[ 'az'][a][ 'Subnets'][s][ 'ec2']: for instance in rezzie[ 'Instances']: ec2_name = self.get_name_tag( instance, 'EC2') ec2_instance = EC2( '{} ({})'. format( ec2_name, instance[ 'InstanceType'] )) self.ec2_instances.append( ec2_instance) ## If there is a load balancer, and the load balancer connects ## to the subnet of the EC2 instance, connect the ELB to the ## EC2 instance in the diagram for elb_tuple in self.elb_subnets: if account_dict['az'][ a]['Subnets'][ s]['SubnetId'] == elb_tuple[ 1]: self.ingress[ -1] >> elb_tuple[ 0] >> ec2_instance for i in range(len(self.ingress)): try: self.ingress[i] >> self.ingress[i + 1] except IndexError: error = "Dead end, baby." for conn in cross_accounts: try: req_node = cross_accounts[conn]['xreq_node'] acc_node = cross_accounts[conn]['acc_node'] req_node >> acc_node except KeyError: continue
# diagram.py # Needs diagrams from pip and graphwiz installed from diagrams import Diagram, Cluster from diagrams.aws.network import Route53 from diagrams.aws.security import ACM with Diagram("SSL Cert", show=False): with Cluster('DNS'): Route53("Zone")-Route53("Record")-ACM("Certificate Manager")
from diagrams.aws.database import RDS from diagrams.aws.network import ELB from diagrams.aws.network import Route53 from diagrams.aws.management import Cloudwatch from diagrams.aws.general import InternetGateway from diagrams.aws.security import CertificateManager, IAM from diagrams.aws.general import Users from diagrams.aws.storage import S3 with Diagram("Architecture", show=False): cw = Cloudwatch("Cloudwatch") s3 = S3("S3") users = Users("Users") iam = IAM("IAM") dns = Route53("DNS") cm = CertificateManager("CertificateManager") listener = InternetGateway(":443") with Cluster("VPC: 172.16.x.x"): lb = ELB("ALB") with Cluster("ECS"): with Cluster("Public Subnet #1"): ecs_stack_1 = ECS("rshiny-app-1") with Cluster("Public Subnet #2"): ecs_stack_2 = ECS("rshiny-app-2") with Cluster("Private Subnet #1"): with Cluster("RDS Cluster"):
from diagrams.aws.management import Cloudwatch graph_attr = {"fontsize": "40"} with Diagram("Static Website on Amazon S3 ", filename="diagram", show=False, graph_attr=graph_attr): users = Users() with Cluster("AWS"): security = Cognito("Cognito") gateway = APIGateway("Gateway") route = Route53("Route53") db = DDB("DynamoDB") email_service = SES("SES") monitoring = Cloudwatch("AWS CloudWatch ") firewall = WAF("AWS WAF") identity = IAM("AWS IAM") with Cluster("CDN"): cdn = S3("S3") >> CF("CloudFront CDN") with Cluster("Functions") as xyz: func_send_mail = Lambda("Send Email") func_store_data = Lambda("Store Data") functions = [func_send_mail, func_store_data] gateway >> Edge() << functions
from diagrams import Diagram, Cluster, Edge from diagrams.aws.network import ELB, Route53 from diagrams.aws.security import CertificateManager from diagrams.aws.storage import S3 from diagrams.generic.blank import Blank with Diagram("AWS Load Balancer", show=False, direction="TB"): logging = S3("logging bucket") endpoint = Route53("endpoint") tls = CertificateManager("tls certificate") with Cluster("vpc"): lb = ELB("load balancer") lb << Edge(label="tls certificate") listener = ELB("listener (*)") listener << tls lb - listener - ELB("target group (*)") endpoint >> lb >> Edge(label="access logs") >> logging Blank("* 0..n")
from diagrams.aws.network import ELB from diagrams.aws.network import NATGateway from diagrams.aws.security import ACM from diagrams.aws.network import Route53 from diagrams.aws.compute import AutoScaling from diagrams.aws.compute import EC2 from diagrams.k8s.network import Ingress from diagrams.k8s.network import Service from diagrams.k8s.compute import Pod from diagrams.onprem.iac import Terraform from diagrams.onprem.ci import GitlabCI from diagrams.aws.storage import S3 with Diagram("EKS Cluster", show=False, direction="LR"): ssl_certificate = ACM("SSL cert") dns_name = Route53("DNS domain") load_balancer = ELB("Load balancer") with Cluster("Custom VPC"): with Cluster("Public network"): public_subnets = [ PublicSubnet("Subnet zone a"), PublicSubnet("Subnet zone b"), PublicSubnet("Subnet zone c"), PublicSubnet("Subnet zone d"), ] nat_gateway = NATGateway("NAT gateway") with Cluster("Private network"): private_subnets = [ PrivateSubnet("Subnet zone a"), PrivateSubnet("Subnet zone b"), PrivateSubnet("Subnet zone c"),
from diagrams import Cluster, Diagram from diagrams.aws.compute import ECS from diagrams.aws.network import Route53, CF from diagrams.aws.storage import S3 from diagrams.aws.management import Cloudformation from diagrams.onprem.vcs import Github from diagrams.onprem.network import Internet with Diagram("NateGramer.com - S3 Backed Public Website", show=False): with Cluster("Feature Branch Stack"): featureDns = Route53("<branch>.NateGramer.com") featureCloudfront = CF("CloudFront Distribution") featureBucket = S3("Site Storage") featureStack = [featureDns, featureCloudfront, featureBucket] featureDns >> featureCloudfront >> featureBucket Github("Pull in any feature branch") >> Cloudformation( "Branch Stack") >> featureStack Internet() >> featureDns with Cluster("Dev Branch Stack"): devDns = Route53("dev.NateGramer.com") devCloudfront = CF("CloudFront Distribution") devBucket = S3("Site Storage") devStack = [devDns, devCloudfront, devBucket] devDns >> devCloudfront >> devBucket Github("Push in Dev Branch") >> Cloudformation("Branch Stack") >> devStack Internet() >> devDns with Cluster("Master Branch Stack"):
#!/usr/bin/env python # coding: utf-8 # In[1]: from diagrams import Cluster, Diagram from diagrams.aws.compute import ECS from diagrams.aws.database import Elasticache, RDS from diagrams.aws.network import ELB, Route53 # In[2]: with Diagram('Clustered Web Services', show=False): dns = Route53('dns') lb = ELB('lb') with Cluster('Services'): svc_group = [ECS('web1'), ECS('web2'), ECS('web3')] with Cluster('DB Cluster'): db_master = RDS('userdb') db_master = [RDS('userdb ro')] memcached = Elasticache('memcached') dns >> lb >> svc_group svc_group >> db_master svc_group >> memchaced # In[ ]:
from diagrams.aws.integration import Eventbridge from diagrams.programming.language import Python, Javascript from diagrams.aws.database import DDB from diagrams.aws.engagement import SES from diagrams.aws.ml import Textract with Diagram("architecture", show=False): with Cluster(" Envoie_de_la notification "): flux_2 = Eventbridge( "planifie l'execution \nde la fonction\nscan_user du lundi \nau vendredi à 8H00" ) >> Lambda("Handler:lambdascan_handeler") #with Cluster("Enregistrement_de_l'utilisateur "): flux_1 = Route53( "redirige le trafic \n de mtchou-mouh.mongulu.cm \n vers le bucket S3 de meme nom" ) >> S3( "3 PAGES WEB :\n-index.html\n-demo.html\n-error.html" ) >> APIGateway( "-OPTIONS:résout\nproblèmeCORS\n-POST:donne les\n informations à LAMBDA" ) >> Lambda( "Handler:lambda.register_handler\n-Enregistrement information dans\n DynamoDB table Register\n-Envoie mail de verification \nvia AmazoneSES" ) with Cluster("Programming"): languages = [Python("BackEnd"), Javascript("FrontEnd")] with Cluster("BackEnd_1"): Backend_1 = [DDB("DataBase"), SES("verification")] with Cluster("BackEnd_2"): Backend_2 = [
from diagrams import Cluster, Diagram from diagrams.aws.compute import ECS from diagrams.aws.database import ElastiCache, RDS from diagrams.aws.network import ELB from diagrams.aws.network import Route53 with Diagram("Clustered Web Services", filename="out/aws-001", outformat="svg", show=False): dns = Route53("dns") lb = ELB("lb") with Cluster("Services"): svc_group = [ECS("web1"), ECS("web2"), ECS("web3")] with Cluster("DB Cluster"): db_master = RDS("userdb") db_master - [RDS("userdb ro")] memcached = ElastiCache("memcached") dns >> lb >> svc_group svc_group >> db_master svc_group >> memcached
from urllib.request import urlretrieve from diagrams import Cluster, Diagram from diagrams.custom import Custom from diagrams.aws.compute import ECS, AutoScaling, EC2 from diagrams.aws.database import ElastiCache from diagrams.aws.network import ELB, Route53 kafka_icon = "assets/kafka.png" with Diagram("Infrastructure architecture", show=False): dns = Route53("Smart-Foodies-Shop.com") lb = ELB("Load Balancer") scaler = AutoScaling("Auto Scaling Group") with Cluster("VPC"): svc_group = [ECS("Frontend"), EC2("Redis"), EC2("Backend")] queue = Custom("Kafka", kafka_icon) db = EC2("MySQL") dns >> lb >> scaler scaler >> svc_group svc_group >> db svc_group >> queue