def linking_nodes_diagram(): from diagrams import Diagram, Cluster from diagrams.aws.compute import EC2 from diagrams.aws.network import ELB from diagrams.aws.network import Route53 from diagrams.onprem.database import PostgreSQL # Would typically use RDS from aws.database from diagrams.onprem.inmemory import Redis # Would typically use ElastiCache from aws.database with Diagram( "Simple Website Diagram", direction='LR' ) as diag4: # It's LR by default, but you have a few options with the orientation dns = Route53("DNS") load_balancer = ELB("Load Balancer") database = PostgreSQL("User Database") cache = Redis("Cache") with Cluster("Webserver Cluster"): svc_group = [ EC2("Webserver 1"), EC2("Webserver 2"), EC2("Webserver 3") ] dns >> load_balancer >> svc_group svc_group >> cache svc_group >> database print( diag4 ) # This will illustrate the diagram if you are using a Google Colab or Jypiter notebook.
def nodes_diagram(): with Diagram("Simple Website Diagram") as diag2: dns = Route53("dns") load_balancer = ELB("Load Balancer") database = PostgreSQL("User Database") cache = Redis("Cache") svc_group = [ EC2("Webserver 1"), EC2("Webserver 2"), EC2("Webserver 3") ] print( diag2 ) # This will illustrate the diagram if you are using a Google Colab or Jypiter notebook.
def generate_overview_diagram(): graph_attr = { "bgcolor": "transparent" } with Diagram("Overview", show=False, filename="bin/overview", graph_attr=graph_attr): with Cluster("Client"): webapp = Angular("webapp") with Cluster("API Services"): status_service = Python("status_service") task_service = Python("task_service") worker_service = Python("worker_service") metrics_service = Python("metrics_service") with Cluster("Intermediate Services"): with Cluster("Clearly Client Cluster"): clearly_client = Python("clearly_client") with Cluster("Backend Services"): with Cluster("Clearly Server Cluster"): clearly_server = Python("clearly_server") with Cluster("External Connections"): with Cluster("Message Broker"): redis = Redis("redis") with Cluster("Monitoring"): grafana = Grafana("grafana") prometheus = Prometheus("prometheus") webapp >> status_service << clearly_client webapp >> task_service << clearly_client webapp >> worker_service << clearly_client clearly_client >> clearly_server >> redis metrics_service << prometheus metrics_service >> clearly_server
def linking_nodes_diagram(): with Diagram( "Simple Website Diagram", direction='LR' ) as diag4: # It's LR by default, but you have a few options with the orientation dns = Route53("DNS") load_balancer = ELB("Load Balancer") database = PostgreSQL("User Database") cache = Redis("Cache") with Cluster("Webserver Cluster"): svc_group = [ EC2("Webserver 1"), EC2("Webserver 2"), EC2("Webserver 3") ] dns >> load_balancer >> svc_group svc_group >> cache svc_group >> database print( diag4 ) # This will illustrate the diagram if you are using a Google Colab or Jypiter notebook.
def nodes_diagram(): from diagrams import Diagram, Cluster from diagrams.aws.compute import EC2 from diagrams.aws.network import ELB from diagrams.aws.network import Route53 from diagrams.onprem.database import PostgreSQL # Would typically use RDS from aws.database from diagrams.onprem.inmemory import Redis # Would typically use ElastiCache from aws.database with Diagram("Simple Website Diagram") as diag2: dns = Route53("dns") load_balancer = ELB("Load Balancer") database = PostgreSQL("User Database") cache = Redis("Cache") svc_group = [ EC2("Webserver 1"), EC2("Webserver 2"), EC2("Webserver 3") ] print( diag2 ) # This will illustrate the diagram if you are using a Google Colab or Jypiter notebook.
def simple_main(): # graph_attr é a lista de parâmetros utilizados na construção do diagrama. graph_attr = {"fontsize": "32", "fontcolor": "#1D3B52", "bgcolor": "white"} # Cria o Diagrama base do nosso mapa with Diagram('fboaventura.dev', direction='LR', filename='simple_fboaventura_dev', outformat='png', graph_attr=graph_attr, show=False) as diag: # Adiciona os nós no mapa ingress = Haproxy('loadbalancer') webserver = Nginx('django') db = Postgresql('database') memcached = Memcached('sessions') # Criamos um cluster para os componentes do Celery, que trabalham em conjunto with Cluster('Celery'): beat = Celery('beat') workers = [Celery('worker1'), Celery('worker2')] flower = Celery('flower') broker = Rabbitmq('broker') logs = Redis('logs') # Montamos o mapa de relacionamentos entre os nós ingress >> webserver webserver >> broker beat >> broker workers >> beat webserver >> db db >> broker webserver >> memcached broker >> logs workers >> logs flower >> beat flower >> workers beat >> logs
node_attr = { "fontsize": "15", "width": "1.2", "height": "1.2", "fontcolor": "#000000" } with Diagram( name="", show=False, filename="docs/diagram/architecture", graph_attr=graph_attr, node_attr=node_attr, edge_attr={"color": "#566573"} ): client = Pod("Clients") api = Pod("APIs") broker = RabbitMQ("Broker") database = Redis("Backend") worker = Pod("Workers") client << api << database << worker client >> api >> broker >> worker
from diagrams.firebase.base import Firebase from diagrams.azure.database import CosmosDb, SQLDatabases, CacheForRedis from diagrams.azure.storage import BlobStorage from diagrams.aws.database import Dynamodb, RDS, Elasticache from diagrams.aws.storage import S3 node_attr = {"fontsize": "20"} graph_attr = {"fontsize": "28"} with Diagram("", show=False, direction="TB", node_attr=node_attr): with Cluster("On-Premises", graph_attr=graph_attr): with Cluster("Relational Database", graph_attr=graph_attr): relational = Mssql("") relational - [Oracle("")] with Cluster("Document Database", graph_attr=graph_attr): documentdb = Mongodb("\nMongoDB") documentdb - [Couchdb("\nCouchDB")] - Firebase("\nFirebase") with Cluster("Cache", graph_attr=graph_attr): cache = Redis("\nRedis") cache - [Memcached("\nMemcached")] - Hazelcast("\nHazelcast") with Cluster("Cloud", graph_attr=graph_attr): with Cluster("Azure", graph_attr=graph_attr): azure = CosmosDb("\nCosmos DB") azure - [ SQLDatabases("\nSQL DB") ] - CacheForRedis("\nRedis") - BlobStorage("\nBlob Storage") with Cluster("AWS", graph_attr=graph_attr): aws = Dynamodb("\nDynamoDB") aws - [RDS("\nRDS")] - Elasticache("\nElasticache") - S3("\nS3")
from diagrams import Diagram, Cluster from diagrams.aws.compute import EC2 from diagrams.aws.network import ELB from diagrams.aws.network import Route53, CloudFront from diagrams.onprem.database import PostgreSQL # Would typically use RDS from aws.database from diagrams.onprem.inmemory import Redis # Would typically use ElastiCache from aws.database from diagrams.aws.storage import S3 from diagrams.aws.database import Aurora with Diagram( "Simple Programs API", direction='LR' ) as diag: # It's LR by default, but you have a few options with the orientation dns = Route53("dns") load_balancer = ELB("Load Balancer") # database = PostgreSQL("Programs DB") cache = Redis("Cache") content = S3("Blob storage") content_cache = CloudFront("CloudFront") with Cluster("DB Cluster"): db_main = Aurora("main") db_main - [Aurora("backup"), Aurora("backup")] with Cluster("Programs API Cluster"): svc_group = [EC2("Server 1"), EC2("Server 2"), EC2("Server 3")] dns >> load_balancer >> svc_group svc_group >> cache >> db_main svc_group >> db_main svc_group >> content_cache >> content diag
random_demo = Server("Random demo") tic_tac_toe = Server("Tic tac toe") users_fastapi = Server("Users Fastapi") webservers = [ fulltext_search, book_collection, geolocation_search, random_demo, users, users_fastapi, ] proxy = HAProxy("Krakend") mqtt_service = Server("MQTT service") mongo = MongoDB("MongoDb") mosquitto = IotMqtt("Mosquitto") grafana = Grafana("Grafana") influxdb = InfluxDB("InfluxDB") redis = Redis("Redis") webservers >> Edge(color="brown") >> mongo users >> Edge(color="brows") >> redis book_collection >> Edge(color="black") >> users mqtt_service >> Edge(color="brown") >> mosquitto mqtt_service >> Edge(color="brown") >> mongo mqtt_service >> Edge(color="brown") >> influxdb grafana >> Edge(color="brown") >> influxdb proxy >> Edge(color="black") >> random_demo proxy >> Edge(color="black") >> users
# Cria o Diagrama base do nosso mapa with Diagram('fboaventura.dev', direction='LR', filename='simple_fboaventura_dev', outformat='png', graph_attr=graph_attr) as diag: # Adiciona os nós no mapa ingress = Haproxy('loadbalancer') webserver = Nginx('django') db = Postgresql('database') memcached = Memcached('sessions') # Criamos um cluster para os componentes do Celery, que trabalham em conjunto with Cluster('Celery'): beat = Celery('beat') workers = [Celery('worker1'), Celery('worker2')] flower = Celery('flower') broker = Rabbitmq('broker') logs = Redis('logs') # Montamos o mapa de relacionamentos entre os nós ingress >> webserver webserver >> broker beat >> broker workers >> beat webserver >> db db >> broker webserver >> memcached broker >> logs workers >> logs flower >> beat flower >> workers beat >> logs
redis_haproxy = HAProxy('HAProxy for Redis Cluster') db_haproxy = HAProxy('HAProxy for MySQL Cluster') with Cluster('Users') as users: with Cluster('Desktop users'): dns << LinuxGeneral() >> lb_firewall >> lb dns << Windows() >> lb_firewall >> lb with Cluster('Mobile users'): dns << IOS() >> lb_firewall >> lb dns << Android() >> lb_firewall >> lb with Cluster('Redis Cluster'): redis_nodes = [] redis_main = Redis('Redis: main') redis_nodes.append(redis_main) for n in range(1, NUM_REDIS_REPLICAS + 1): replica = Redis(f'Redis: replica {n}') redis_nodes.append(replica) redis_main << replica redis_main >> replica replica >> redis_haproxy redis_haproxy >> replica redis_haproxy.connect( redis_main, Edge(redis_haproxy, reverse=True, color='#5e73e5')) redis_main.connect(redis_haproxy,
Server("fe-server-03") ] backend_lb = Nginx("Backend-LB") with Cluster("Backend Cluster"): backend = [ Server("be-server-01"), Server("be-server-02"), Server("be-server-03") ] Mysql_db = Mysql("MySQL-DB") with Cluster("Redis Cluster"): master = Redis("Master") master - Redis("Replica") with Cluster("Metrics"): metrics = Prometheus("Metric") metrics << Grafana("Monitoring") frontend_lb = Nginx("Frontend-LB") internet = Internet("Internet") webuser = Users("User") webuser >> Edge(color="black", label="TCP/443") >> internet >> Edge(color="black", label="TCP/443") >> \ frontend_lb >> Edge(color="darkgreen", label="TCP/80") >> frontend >> Edge(color="darkgreen", label="TCP/80") >> \ backend_lb >> Edge(color="darkgreen", label="TCP/80") >> backend >> Edge(color="red", label="TCP/3306",style="dashed") >> Mysql_db \ >> backend >> Edge(color="orange", style="dotted") >>metrics backend >> Edge(color="blue", style="dotted") >> master
from diagrams import Cluster, Diagram, Edge from diagrams.onprem.compute import Server from diagrams.onprem.inmemory import Redis from diagrams.onprem.monitoring import Prometheus from diagrams.onprem.network import Haproxy from diagrams.aws.compute import ECS with Diagram(name="Advanced Prometheus Cluster Setup", show=False): haproxy = Haproxy("haproxy") with Cluster("App Cluster"): app = Server("app") app_redis = Redis("pushgateway_redis_buffer") app - Edge(color="brown", style="dashed") - app_redis app_cluster = [app, app_redis] with Cluster("Prometheus Cluster"): with Cluster("Prom1"): push1 = ECS('pushgateway') prom1 = Prometheus('prometheus') with Cluster("Prom2"): push2 = ECS('pushgateway') prom2 = Prometheus('prometheus') with Cluster("Prom3"): push3 = ECS('pushgateway') prom3 = Prometheus('prometheus') push1 << Edge(label="pull", color="brown") << prom1
postgres = PostgreSQL("AWS-RDS PG") with Cluster("Node Cold Storage"): cold_storage_node = NodeJS("Cold Storage Code") s3_cold_storage = S3("S3 (Cold Storage)") with Cluster("Scanning"): with Cluster("Configurable CRON jobs"): cron = [NodeJS("Nightly CRON job"), NodeJS("Weekly CRON job")] with Cluster("Producer Logic"): producer_node = NodeJS("producer code") with Cluster("Message Queues"): queue = Redis("Headless Queue") with Cluster("Node.js Consumers"): node_consumer_apps = [NodeJS("Headless Scans"), NodeJS("HTTP scans")] # API ( api_data_gov >> Edge(label="manages") >> router >> Edge(label="calls") >> node_api_app ) node_api_app >> Edge(label="queries") >> postgres # Data and Storage
restaurantDB = MySQL("restaurant") restaurantSvc = Service("restaurant svc") restaurantApp = Pod("restaurant app") restaurantApp >> restaurantDB restaurantSvc >> restaurantApp reviewDB = MongoDB("review") reviewSvc = Service("review svc") with Cluster("review app"): reviewApp1 = Pod("v1") reviewApp2 = Pod("v2") reviewApps = [reviewApp1, reviewApp2] reviewApps >> reviewDB ratingDB = Redis("rating") ratingSvc = Service("rating svc") ratingApp = Pod("rating app") ratingApp >> ratingDB gatewaySvc >> gateway reviewSvc >> reviewApps ratingSvc >> ratingApp customUser >> ing >> orderWebUi >> gatewaySvc gateway >> orderSvc gateway >> restaurantSvc orderApp >> reviewSvc orderApp >> restaurantSvc reviewApp2 >> ratingSvc
from diagrams.onprem.network import Internet, Nginx from diagrams.saas.chat import Slack from diagrams.onprem.client import User graph_attr = {"fontsize": "18", "pad": "0"} with Diagram( "SSL Checker Dashboard Diagram", filename="ssl-checker-diagram", show=False, graph_attr=graph_attr, ): user = User("User") ingress = Nginx("ingress") with Cluster("Dashboard Replicas"): dashboards = [ Docker("dashboard"), Docker("dashboard"), Docker("dashboard"), ] checker = Docker("checker") notifier = Docker("notifier") user >> ingress >> dashboards >> Redis("redis") << [checker, notifier] checker >> Internet("hosts with SSL") notifier >> Slack("Slack")
prometheus = Prometheus("Prometheus") grafana = Grafana("Grafana") prometheus >> service1 prometheus >> service2 grafana >> prometheus support >> grafana with Cluster("Streams"): kafka = Kafka("Kafka") ibmmq = Ibmmq("MQ") with Cluster("Databases"): cassandra = Cassandra("NoSQL DB\n(Cassandra)") database = PostgreSQL("SQL DB\n(Postgres)") redis = Redis("Cache\n(Redis)") with Cluster("Other services"): service3 = EC2("") service4 = EC2("") service5 = EC2("") service6 = EC2("") with Cluster("SSO Infrastructure"): sso = EC2("SSO") user >> internet internet >> load_balancer load_balancer >> service1
if len(sys.argv) > 1: file = str(sys.argv[1]) else: file = "diagram" with Diagram("Advanced Web Service with On-Premise", filename=file, show=False): ingress = Nginx("ingress") metrics = Prometheus("metric") metrics << Grafana("monitoring") with Cluster("Service Cluster"): grpcsvc = [Server("grpc1"), Server("grpc2"), Server("grpc3")] with Cluster("Sessions HA"): master = Redis("session") master - Redis("replica") << metrics grpcsvc >> master with Cluster("Database HA"): master = PostgreSQL("users") master - PostgreSQL("slave") << metrics grpcsvc >> master aggregator = Fluentd("logging") aggregator >> Kafka("stream") >> Spark("analytics") ingress >> grpcsvc >> aggregator
from diagrams import Cluster, Diagram, Edge from diagrams.programming.framework import React from diagrams.programming.language import Rust from diagrams.programming.language import TypeScript from diagrams.onprem.database import MariaDB from diagrams.onprem.inmemory import Redis with Diagram("Architecture", show=False): storages = [MariaDB("MariaDB"), Redis("Redis")] with Cluster("Client"): index = React("index.html") with Cluster("Pages"): pages_index = React("App.tsx") pages = pages_index - [React("timeline"), React("post")] index >> pages_index with Cluster("Components"): components_index = TypeScript("index.ts") components = components_index - [ React("TextField"), React("Section") ] for i in range(len(pages)): pages[i] >> components_index with Cluster("Models"): models_index = TypeScript("index.ts") models = models_index - [TypeScript("post"), TypeScript("session")] with Cluster("API Fetchers"):
web_clients << Edge(color="green") >> hasura >> Edge( color="green") << pg with Cluster("Aiven"): kfk = Kafka("Kakfa") web_clients << Edge(color="red", label="Produce/Consume") >> kfk kfk >> Edge(color="red", label="Postgres Sink Connector") >> pg with Cluster("Message Search"): es = Elasticsearch("Elasticsearch") kfk >> Edge(color="blue", label="Elasticsearch Sink Connector") >> es es << Edge(color="blue", label="Search") >> web_clients with Cluster("Caching"): with Cluster("Aiven"): rds = Redis("Redis") web_clients << Edge(color="yellow", label="Response") << rds << Edge( color="yellow", label="Response") << hasura # Step 5 with Diagram(show=True, filename=files[4]): with Cluster("Metrics"): metrics = InfluxDB("InfluxDB / M3") graf = Grafana("Dashboards") with Cluster("Web"): web_clients = [React("Client 1"), React("Client 2")] with Cluster("API and Database"): with Cluster("Heroku"): hasura = Server("GraphQL") with Cluster("Aiven"):
from diagrams.onprem.logging import Fluentd from diagrams.onprem.monitoring import Grafana, Prometheus from diagrams.onprem.network import Nginx from diagrams.onprem.queue import Kafka with Diagram(name="Advanced Web Service with On-Premise (colored)", show=False): ingress = Nginx("ingress") metrics = Prometheus("metric") metrics << Edge(color="firebrick", style="dashed") << Grafana("monitoring") with Cluster("Service Cluster"): grpcsvc = [ Server("grpc1"), Server("grpc2"), Server("grpc3")] with Cluster("Sessions HA"): master = Redis("session") master - Edge(color="brown", style="dashed") - Redis("replica") << Edge(label="collect") << metrics grpcsvc >> Edge(color="brown") >> master with Cluster("Database HA"): master = PostgreSQL("users") master - Edge(color="brown", style="dotted") - PostgreSQL("slave") << Edge(label="collect") << metrics grpcsvc >> Edge(color="black") >> master aggregator = Fluentd("logging") aggregator >> Edge(label="parse") >> Kafka("stream") >> Edge(color="black", style="bold") >> Spark("analytics") ingress >> Edge(color="darkgreen") << grpcsvc >> Edge(color="darkorange") >> aggregator
from diagrams.oci.monitoring import Queue from diagrams.onprem.database import MySQL from diagrams.onprem.inmemory import Redis from diagrams.onprem.network import Nginx, Apache, Internet from diagrams.onprem.queue import Celery """ https://diagrams.mingrammer.com """ with Diagram("Sample", show=False): user_web = EC2("Web") with Cluster('WEB SERVER'): web_server = [Apache('Web Server'), Nginx('Web Server')] with Cluster('QUEUE'): sever_queue = Redis('Queue Broker') sever_queue - [Celery('Queue Worker')] with Cluster('DATABASE'): db_master = MySQL('Database') # db_master - Database('Database') with Cluster('DATABASE2'): db_slave = MySQL('Database2') # db_slave - Database('Database2') user_web >> web_server >> sever_queue sever_queue >> db_master sever_queue >> db_slave web_server >> db_master web_server >> db_slave
from diagrams import Cluster from diagrams.onprem.compute import Server from diagrams.onprem.inmemory import Redis from diagrams.onprem.queue import RabbitMQ with Cluster('Graph clustering'): grphclus = Server('grphclus-rest') grphclus_optimizer = Server('grphclus-optimizer') grphclus_pollster = Server('grphclus-pollster') inmemory = Redis('in memory') bus = RabbitMQ('edge inputs') grphclus_pollster >> bus grphclus_optimizer >> [grphclus_pollster, inmemory] grphclus >> inmemory
with Diagram(title, filename=filename, graph_attr=graph_attr, outformat='png', direction=direction, show=False): ingress = Nginx("ingress") metrics = Prometheus("metric") metrics << Edge(color="firebrick", style="dashed") << Grafana("monitoring") with Cluster("Service Cluster"): grpcsvc = [Server("grpc1"), Server("grpc2"), Server("grpc3")] with Cluster("Sessions HA"): primary = Redis("session") primary - Edge(color="brown", style="dashed") - Redis("replica") << Edge( label="collect") << metrics grpcsvc >> Edge(color="brown") >> primary with Cluster("Database HA"): primary = PostgreSQL("users") primary - Edge(color="brown", style="dotted") - PostgreSQL("replica") << Edge( label="collect") << metrics grpcsvc >> Edge(color="black") >> primary aggregator = Fluentd("logging") aggregator >> Edge(label="parse") >> Kafka("stream") >> Edge( color="black", style="bold") >> Spark("analytics")
from diagrams.onprem.monitoring import Grafana, Prometheus from diagrams.onprem.network import Nginx from diagrams.onprem.queue import Kafka with Diagram(name="Advanced Web Service with On-Premise (colored)", show=False): ingress = Nginx("ingress") metrics = Prometheus("metric") metrics << Edge(color="firebrick", style="dashed") << Grafana("monitoring") with Cluster("Service Cluster"): grpcsvc = [Server("grpc1"), Server("grpc2"), Server("grpc3")] with Cluster("Sessions HA"): master = Redis("session") master - Edge(color="brown", style="dashed") - Redis("replica") << Edge( label="collect") << metrics grpcsvc >> Edge(color="brown") >> master with Cluster("Database HA"): master = PostgreSQL("users") master - Edge(color="brown", style="dotted") - PostgreSQL("slave") << Edge( label="collect") << metrics grpcsvc >> Edge(color="black") >> master aggregator = Fluentd("logging") aggregator >> Edge(label="parse") >> Kafka("stream") >> Edge( color="black", style="bold") >> Spark("analytics")