def batch_stepfunctions(): stack_objective = "batch-stepfunctions" with Diagram(stack_objective, outformat="png", filename=f"{stack_objective}/pics/arch", show=False): with Cluster("StepFunctions"): batch = Batch("AWS Batch") Cloudwatch("CloudWatch Event") >> Edge(label="cron") \ >> batch << Edge(label="image") << EC2ContainerRegistry("ECR") batch >> Edge(label="access through IAM Role") >> S3("S3")
def apigw_dynamodb_sfn_with_heavytask(): stack_objective = "apigw-dynamodb-sfn-with-heavytask" with Diagram(stack_objective, outformat="png", filename=f"{stack_objective}/pics/arch", show=False): sqs = SQS("SQS") apigw = APIGateway("/task") >> Lambda("integration") >> [ sqs, Dynamodb("DynamoDB") ] timer_lambda = Lambda("timer lambda") sqs << Edge(label="dequeue") << timer_lambda << Cloudwatch("cron") with Cluster(label="StepFunctions", direction="TB"): sfn_start = SFn_TASK("update DynamoDB\nset `running`") sfn_start \ >> Lambda("Some Task") \ >> [SFn_TASK("update DynamoDB\nset `success`"), SFn_TASK("update DynamoDB\nset `failure`")] # invoke sfn from Lambda timer_lambda >> sfn_start
with Diagram("Static Website on Amazon S3 ", filename="diagram", show=False, graph_attr=graph_attr): users = Users() with Cluster("AWS"): security = Cognito("Cognito") gateway = APIGateway("Gateway") route = Route53("Route53") db = DDB("DynamoDB") email_service = SES("SES") monitoring = Cloudwatch("AWS CloudWatch ") firewall = WAF("AWS WAF") identity = IAM("AWS IAM") with Cluster("CDN"): cdn = S3("S3") >> CF("CloudFront CDN") with Cluster("Functions") as xyz: func_send_mail = Lambda("Send Email") func_store_data = Lambda("Store Data") functions = [func_send_mail, func_store_data] gateway >> Edge() << functions functions >> Edge() << identity func_send_mail >> Edge() >> email_service >> users
with Cluster('Amazon Web Services'): with Cluster('Virtual Private Cloud'): kube = EKS('Elastic Kubernetes\nService') instance_1 = EC2('EC2 Instance 1\n(m5.xlarge)') disk_1 = EBS('EBS Disk 1\n(gp2)') instance_2 = EC2('EC2 Instance 2\n(m5.xlarge)') disk_2 = EBS('EBS Disk 2\n(gp2)') instance_1 - disk_1 instance_2 - disk_2 kube - instance_1 kube - instance_2 kube - RDS('Amazon RDS\nfor PostgreSQL\n(db.t3.large)') kube - S3('S3') kube - Cloudwatch('Amazon CloudWatch') dns = Route53('Route53') lb = ELB('Elastic Load Balancer') lb >> kube dns >> lb [Client('SDKs'), Users('Users')] >> dns with Diagram('Google Cloud Platform resources', show=False, filename='gke', outformat='png'): with Cluster('Google Cloud Platform'): with Cluster('Virtual Private Cloud'): kube = GKE('Google Kubernetes\nEngine') instance_1 = ComputeEngine('VM Instance 1\n(n1-standard-4)')
promotions_cms >> promotions_gw >> promotions_be with Cluster("In Store"): printer_fe = S3("Docket Printer Front-end") docket_printer = Server("Docket Printer") cashier = User("Cashier") store_fe = S3("Store Front-end") store_api = APIGateway("Store API") store_be = Lambda("Store BE") cashier >> Edge(label="Retrieve printed docket") >> docket_printer printer_fe >> docket_printer store_db = S3("Store DB") cashier >> store_fe >> store_api >> store_be >> store_db with Cluster("Alerting"): logs = Cloudwatch("CloudWatch") alerts = SNS("SNS") logs >> alerts with Cluster("Ordering"): customer_fe = S3("Customer Front-end") customer_api = APIGateway("Customer API") customer_be = Lambda("Customer BE") customer_fe >> customer_api >> customer_be customer_fe >> Edge(label="Redirect for payment") >> payment_gw payment_gw >> Edge(label="Confirm payment status") >> customer_api customer_be >> Edge(label="Payment failures") >> logs promotions_be >> Edge(label="Promotion publication failures") >> logs promotions_be >> customer_api marketing >> Edge(label="Maintain promotions") >> promotions_cms
PrivateSubnet("Private Subnet") with Cluster("Loadbalancing"): loadbalancer = ELB("Loadbalancer\nEndpoint") [maindomain, secondarydomain] >> loadbalancer with Cluster("ECS Cluster"): clusterecs = ECS("Webservices-Prod") autoscalingclusterecs = AutoScaling("Cluster Scaling") ec2 = EC2("EC2 Instances") alarmscluster = Cloudwatch("Cluster Reserved CPU Alarm") clusterecs >> alarmscluster >> autoscalingclusterecs >> ec2 with Cluster("Webservices Service"): webservices = EC2("Webservices Tasks") autoscalingwebservices = AutoScaling( "Webservices docker scaling") alarmswebservices = Cloudwatch("Service CPU Alarm") loadbalancer >> webservices >> ecr webservices >> alarmswebservices >> autoscalingwebservices
from diagrams.aws.compute import EC2 from diagrams.aws.compute import AutoScaling from diagrams.aws.database import RDS from diagrams.aws.network import ELB from diagrams.aws.management import Cloudwatch from diagrams.aws.storage import S3 with Diagram("AIK Archicteture Diagram Solution", show=False): bucket = S3("Bucket x3") vpc = Cluster("VPC") with vpc: loadBal = ELB("ELB") jenkins = EC2("Jenkins CI/CD Server") with Cluster("EC2 Instances - AutoScaling Group"): ec2_1 = EC2("AIK App") ec2_2 = EC2("AIK App") with Cluster("DB RDS MySQL"): rds = RDS("") rds << Edge(label="") >> ec2_1 rds << Edge(label="") >> ec2_2 loadBal >> ec2_1 loadBal >> ec2_2 ec2_1 >> bucket ec2_2 >> bucket ec2_1 << Cloudwatch("CloudWatch")
"fontname": "Helvetica", "style": "rounded", "bgcolor": "transparent" } cluster = { "fontsize": "16", "fontname": "Helvetica", "style": "rounded", "bgcolor": "transparent" } with Diagram("Codebuild", graph_attr=graph_attr, direction="LR"): with Cluster("Code change", graph_attr=major_cluster): with Cluster("Trigger", graph_attr=cluster): Trigger = Cloudwatch("Event Trigger") IAMRole("Trigger Role") >> Trigger Cloudwatch = Codecommit("Code Change") >> Edge( color="firebrick") >> Cloudwatch("Event Rule") >> Trigger with Cluster("Build", graph_attr=cluster): Build = Codebuild("Codebuild") IAMRole("Codebuild Role") >> Build Build << Edge(color="black") >> ParameterStore("Build No") Build << Edge(color="black") >> ParameterStore("Latest") Build >> Edge(color="darkgreen") >> S3("Artefact Store") Trigger >> Edge(color="firebrick") >> Build
from diagrams import Diagram from diagrams.aws.compute import Lambda from diagrams.aws.management import Cloudwatch from diagrams.saas.chat import Slack with Diagram('人数0人Channelアーカイブbatch'): Cloudwatch('CloudwatchEvent\n週1回実行') >> Lambda('Lambda') >> Slack()
from diagrams import Diagram, Cluster from diagrams.aws.management import Cloudwatch with Diagram("AWS CloudWatch Event Rule", show=False, direction="TB"): with Cluster("cloudwatch event rule"): source = Cloudwatch("event source") target = Cloudwatch("event target")
from diagrams import Diagram, Cluster from diagrams.onprem.container import Docker from diagrams.generic.network import Firewall from diagrams.aws.management import Cloudwatch, ParameterStore from diagrams.aws.storage import EFS from diagrams.aws.compute import ECS with Diagram("AWS ECS Task Definition", show=False, direction="TB"): with Cluster("task definition"): image = Docker("image") ports = Firewall("ports") logging = Cloudwatch("logging") environment = ECS("environment") secrets = ParameterStore("secrets") health_check = ECS("health check") mount_points = EFS("mount points")
from diagrams.aws.storage import S3 from diagrams.aws.database import Neptune from diagrams.onprem.client import Client from diagrams.aws.management import Cloudwatch graph_attr = { "fontsize": "30" } with Diagram("Neptune Database / AWS US-East-1 ", filename="diagram", show=False, graph_attr=graph_attr): with Cluster("AWS"): rdf4jWorkbench = EC2("rdf4j-workbench") s3Bucket = S3("data load bucket") cloudWatch = Cloudwatch("CloudWatch") with Cluster("Neptune"): neptuneDb = Neptune("cluster") neptuneDb - [Neptune("Writer"), Neptune("Reader")] rdf4jWorkbench >> neptuneDb rdf4jWorkbench << neptuneDb cloudWatch << neptuneDb s3Bucket >> neptuneDb Client("web ui") >> Edge(color="darkblue", style="dotted") << rdf4jWorkbench >> Edge()
from diagrams.aws.compute import Lambda from diagrams.aws.storage import S3 from diagrams.aws.database import Dynamodb from diagrams.aws.integration import SQS, SNS from diagrams.aws.ml import Rekognition with Diagram("IOT Diagram", show=True, direction="TB"): _iotoutside = InternetOfThings("ESP-32 Board") with Cluster("AWS Serverless IOT"): _iotcore = IotCore("ESP-32 Iot Core") _iotevent = IotEvents("Event trigger SQS") _sqsesp32 = SQS("SQS Queue ESP32") _logsesp32 = Cloudwatch("Log Operations") _eventtriggeresp32 = Cloudwatch("Event Trigger to LAMBDA") _lambdaprocessimages = Lambda("Lambda process images") _imgrekog = Rekognition("Rekognition process") _tabledynamo = Dynamodb("Table history") _s3bucket = S3("S3 bucket images converted") _snstopico = SNS("Alert cat found") _iotoutside >> _iotcore >> _iotevent >> _sqsesp32 _iotcore >> _logsesp32 _eventtriggeresp32 >> _lambdaprocessimages >> _sqsesp32 _lambdaprocessimages >> _imgrekog _imgrekog >> _lambdaprocessimages _lambdaprocessimages >> _tabledynamo _lambdaprocessimages >> _s3bucket _lambdaprocessimages >> _snstopico
# diagram.py from diagrams import Cluster, Diagram, Edge from diagrams.aws.devtools import Codecommit from diagrams.aws.integration import SNS from diagrams.aws.management import Cloudwatch with Diagram("AWS Codecommit", show=False): with Cluster("Module"): Codecommit("Code Repository") >> Cloudwatch( "CW Change Trigger") >> SNS("SNS Fan Out")
# diagram.py from diagrams import Cluster, Diagram from diagrams.aws.security import Inspector from diagrams.aws.management import Cloudwatch with Diagram("Inspector", show=False): Cloudwatch("Event")>>Inspector("Inspector")
from diagrams import Diagram, Cluster from diagrams.aws.storage import S3 from diagrams.aws.compute import Lambda from diagrams.aws.integration import SNS, Eventbridge from diagrams.aws.management import Cloudwatch from diagrams.onprem.queue import ActiveMQ with Diagram("Alerting Workflow", show=True): with Cluster('main account'): topic = SNS('SNS Topic') with Cluster('Lambda'): l = Lambda('processor') topic >> l S3('lambda source') - l cl = Cloudwatch('Cloudwatch') l >> cl event = Eventbridge('Cloudwatch\nevent rule') cl >> event with Cluster('Event Bus'): event_bus = ActiveMQ('bus') event >> event_bus
thanos = Thanos("\nThanos") prometheus - thanos with Cluster("Storage", graph_attr=graph_attr): with Cluster("Logs", graph_attr=graph_attr): elasticsearch = Elasticsearch("\nElasticsearch") solr = Solr("\nSolr") mongodb = Mongodb("\nMongoDB") elasticsearch - solr - mongodb with Cluster("Metrics", graph_attr=graph_attr): influx = Influxdb("\nInfluxDB") prometheus2 = Prometheus("\nPrometheus") prometheus2 - influx loki >> elasticsearch thanos >> prometheus2 with Cluster("Visualization", graph_attr=graph_attr): kibana = Kibana("\nKibana") grafana = Grafana("\nGrafana") influx >> kibana mongodb >> grafana with Cluster("Cloud", graph_attr=graph_attr): with Cluster("Azure", graph_attr=graph_attr): azlog = LogAnalyticsWorkspaces("\nLog Analytics") with Cluster("AWS", graph_attr=graph_attr): awslog = Cloudwatch("\nCloudwatch") grafana >> Edge(color="white") >> azlog
from diagrams.aws.network import InternetGateway, RouteTable, VPCRouter from diagrams.aws.security import KMS, IAMRole from diagrams.generic.network import Firewall from diagrams.onprem.network import Internet graph_attr = { "pad": "0", "bgcolor": "transparent" } with Diagram("template-08", show=False, direction="LR", filename="diagram-08", graph_attr=graph_attr): internet = Internet("Public Internet") with Cluster("Vpc 10.0.0.0/16"): internet_gateway = InternetGateway("Igw") internet - internet_gateway routeTable = RouteTable("RouteTable") routeTable >> Edge(label="0.0.0.0/0", style="dashed") >> internet_gateway with Cluster("Subnet 10.0.0.0/24"): router = VPCRouter("Router\n10.0.0.1") router - Edge(style="dashed") - routeTable router - internet_gateway ec2 = EC2("ec2\n10.0.0.x") ec2 - Edge(style="dashed") - router sg = Firewall("SG: 22/tcp") ec2 - sg - router KMS("KeyPair") - ec2 ec2Role = IAMRole("Ec2InstanceRole") - ec2 cw = Cloudwatch("CloudWatch") cw - internet
Blank("") # with Cluster("Private Subnet (2)") as priv2: tableau = Tableau("Tableau Server\n(EC2)") with Cluster("S3 Data Lake"): s3data = storage.S3("Data Bucket") s3meta = storage.S3("Metadata Bucket") s3logs = storage.S3("Logging Bucket") sftp = TransferForSftp("SFTP\nTransfer Service") py_fn1 = compute.Lambda("File Listener\n(Lambda Python)") glue = Glue("Spark Transforms\n(Glue)") # with Cluster("AWS Serverless"): events = Eventbridge("Event Triggers\n(AWS Eventbridge)") secrets = security.SecretsManager("AWS Secrets\nManager") cw = Cloudwatch("Cloudwatch Logs") source = Internet("External\nData Source") py_fn1 << s3data << py_fn1 glue << s3data << glue nat << singer1 nat >> source elb >> tableau s3meta >> singer1 >> s3data singer1 << secrets singer1 << events rs1 << singer1 users >> elb source >> singer1