예제 #1
0
def cb(c):
    with Diagram('Context Broker Pod',
                 filename='cb',
                 show=False,
                 graph_attr={'pad': '0.0'}):
        _kafka = Kafka('Kafka')
        _zk = Zookeeper('Zookeeper')
        _logstash = Logstash('Logstash')
        _elasticsearch = Elasticsearch('Elasticsearch')
        _kibana = Kibana('Kibana')
        _cb_man = Python('cb-manager')

        with Cluster('elasticsearch-config'):
            _elasticsearch_cfg = [
                CM('elasticsearch.yml'),
                CM('log4j2.properties')
            ]
        _ = _elasticsearch_cfg - _elasticsearch

        with Cluster('logstash-config'):
            _logstash_cfg = [CM('logstash.yml'), CM('log4j2.properties')]
        _ = _logstash_cfg - _logstash

        with Cluster('logstash-pipeline'):
            _logstash_pipe = [CM('data.conf')]
        _ = _logstash_pipe - _logstash

        _zk - _kafka >> _logstash >> _elasticsearch << _cb_man
        _elasticsearch << _kibana
        _logstash << _cb_man >> _kafka
예제 #2
0
def cb(c):
    with Diagram('cb', show=False):
        _kafka = Kafka('Kafka')
        _zk = Zookeeper('Zookeeper')
        _logstash = Logstash('Logstash')
        _elasticsearch = Elasticsearch('Elasticsearch')
        _cb_man = Python('cb-manager')

        with Cluster('elasticsearch-config'):
            _elasticsearch_cfg = [
                CM('elasticsearch.yml'),
                CM('log4j2.properties')
            ]
        _ = _elasticsearch_cfg - _elasticsearch

        with Cluster('logstash-config'):
            _logstash_cfg = [CM('logstash.yml'), CM('log4j2.properties')]
        _ = _logstash_cfg - _logstash

        with Cluster('logstash-pipeline'):
            _logstash_pipe = [CM('data.conf')]
        _ = _logstash_pipe - _logstash

        _zk - _kafka >> _logstash >> _elasticsearch << _cb_man
        _logstash << _cb_man >> _kafka
예제 #3
0
def cb(c, version):
    with Diagram(f'Context Broker (ver. {version}) Pod', filename=f'cb-{version}', show=False, graph_attr={'pad': '0.0'}):
        _metricbeat = Beats('Metricbeat')
        _heartbeat = Beats('Heartbeat')
        _kafka = Kafka('Kafka')
        _zk = Zookeeper('Zookeeper')
        _logstash = Logstash('Logstash')
        _elasticsearch = Elasticsearch('Elasticsearch')
        _kibana = Kibana('Kibana')

        with Cluster('elasticsearch-config'):
            _elasticsearch_cfg = [CM(f'elasticsearch-{version}.yml'), CM('log4j2.properties')]
        _ = _elasticsearch_cfg - _elasticsearch

        with Cluster('heartbeat-config'):
            _heartbeat_cfg = [CM('hearbeat.yml')]
        _ = _heartbeat_cfg - _heartbeat

        with Cluster('heartbeat-monitor'):
            _heartbeat_monitor = [CM('elasticsearch.yml'), CM('host.yml'), CM('kafka.yml'),
                                  CM('kibana.yml'), CM('logstash.yml'), CM('zookeeper.yml')]
        _ = _heartbeat_monitor - _heartbeat

        with Cluster('kibana-config'):
            _kibana_cfg = [CM('kibana.yml')]
        _ = _kibana_cfg - _kibana

        with Cluster('logstash-config'):
            _logstash_cfg = [CM('logstash.yml'), CM('pipelines.yml'), CM('log4j2.properties')]
        _ = _logstash_cfg - _logstash

        with Cluster('logstash-pipeline'):
            _logstash_pipe = [CM('apache.conf'), CM('mysql.conf'), CM('ssh-server.conf'), CM('system.conf')]
        _ = _logstash_pipe - _logstash

        with Cluster('metricbeat-config'):
            _metricbeat_cfg = [CM('metricbeat.yml')]
        _ = _metricbeat_cfg - _metricbeat

        with Cluster('metricbeat-modules'):
            _metricbeat_mod = [CM('kafka.yml')]
        _ = _metricbeat_mod - _metricbeat

        _zk - _kafka >> _logstash >> _elasticsearch
        _elasticsearch << _kibana
        _logstash << _metricbeat
        _logstash << _heartbeat
예제 #4
0
#
#  (C) Copyright 2021  Pavel Tisnovsky
#
#  All rights reserved. This program and the accompanying materials
#  are made available under the terms of the Eclipse Public License v1.0
#  which accompanies this distribution, and is available at
#  http://www.eclipse.org/legal/epl-v10.html
#
#  Contributors:
#      Pavel Tisnovsky
#

from diagrams import Diagram
from diagrams.onprem.queue import Kafka, Rabbitmq
from diagrams.programming.language import Go

# novy graf s urcenim jeho zakladnich vlastnosti
with Diagram("OnPrem #1", show=True):
    # definice uzlu - konzument
    consumer = Kafka("input stream")

    # definice uzlu - worker
    worker = Go("worker")

    # definice uzlu - producent
    producer = Rabbitmq("output stream")

    # propojeni uzlu grafu orientovanymi hranami
    consumer >> worker >> producer
예제 #5
0
#  which accompanies this distribution, and is available at
#  http://www.eclipse.org/legal/epl-v10.html
#
#  Contributors:
#      Pavel Tisnovsky
#

from diagrams import Diagram
from diagrams.onprem.queue import Kafka, ActiveMQ
from diagrams.programming.language import Go, Rust
from diagrams.aws.database import RDS

# definice diagramu se specifikaci jeho zakladnich vlastnosti
with Diagram("OnPrem #6", show=True, direction="TB"):
    # definice uzlu - konzument
    consumer = Kafka("input stream")

    # definice uzlu - databaze
    db = RDS("storage")

    # rozvetveni - vetsi mnozstvi workeru
    workersA = [Go("worker #1"), Go("worker #2"), Go("worker #3")]

    # buffer vlozeny mezi skupiny workeru
    buffer = ActiveMQ("buffer")

    # rozvetveni - vetsi mnozstvi workeru
    workersB = [Rust("worker #1"), Rust("worker #2"), Rust("worker #3")]

    # definice uzlu - producent
    producer = Kafka("output stream")
예제 #6
0
            color="green") << pg

    # Step 2
    with Diagram(show=True, filename=files[1]):
        with Cluster("Web"):
            web_clients = [React("Client 1"), React("Client 2")]
        with Cluster("API and Database"):
            with Cluster("Heroku"):
                hasura = Server("GraphQL")
            with Cluster("Aiven"):
                pg = PostgreSQL("DB")
        web_clients << Edge(color="green") >> hasura >> Edge(
            color="green") << pg

        with Cluster("Aiven"):
            kfk = Kafka("Kakfa")
        web_clients << Edge(color="red", label="Produce/Consume") >> kfk
        kfk >> Edge(color="red", label="Postgres Sink Connector") >> pg

    # Step 3
    with Diagram(show=True, filename=files[2]):
        with Cluster("Web"):
            web_clients = [React("Client 1"), React("Client 2")]
        with Cluster("API and Database"):
            with Cluster("Heroku"):
                hasura = Server("GraphQL")
            with Cluster("Aiven"):
                pg = PostgreSQL("DB")
        web_clients << Edge(color="green") >> hasura >> Edge(
            color="green") << pg
예제 #7
0
if len(sys.argv) > 1:
    file = str(sys.argv[1])
else:
    file = "diagram"

with Diagram("Advanced Web Service with On-Premise", filename=file,
             show=False):
    ingress = Nginx("ingress")

    metrics = Prometheus("metric")
    metrics << Grafana("monitoring")

    with Cluster("Service Cluster"):
        grpcsvc = [Server("grpc1"), Server("grpc2"), Server("grpc3")]

    with Cluster("Sessions HA"):
        master = Redis("session")
        master - Redis("replica") << metrics
        grpcsvc >> master

    with Cluster("Database HA"):
        master = PostgreSQL("users")
        master - PostgreSQL("slave") << metrics
        grpcsvc >> master

    aggregator = Fluentd("logging")
    aggregator >> Kafka("stream") >> Spark("analytics")

    ingress >> grpcsvc >> aggregator
예제 #8
0
            service2 = EC2("Server node 2\n(Scala)")
            infinispan2 = Infinispan("Distributed node cache\n(Infinispan)")

        service1 >> infinispan1
        service2 >> infinispan2

        prometheus = Prometheus("Prometheus")
        grafana = Grafana("Grafana")

        prometheus >> service1
        prometheus >> service2
        grafana >> prometheus
        support >> grafana

        with Cluster("Streams"):
            kafka = Kafka("Kafka")
            ibmmq = Ibmmq("MQ")

        with Cluster("Databases"):
            cassandra = Cassandra("NoSQL DB\n(Cassandra)")
            database = PostgreSQL("SQL DB\n(Postgres)")
            redis = Redis("Cache\n(Redis)")

        with Cluster("Other services"):
            service3 = EC2("")
            service4 = EC2("")
            service5 = EC2("")
            service6 = EC2("")

        with Cluster("SSO Infrastructure"):
            sso = EC2("SSO")
예제 #9
0
def main():
    graph_attr = {
        "fontsize": "45",
        'overlap_scaling': '100',
        'size': '24!',
        'ratio': 'expand'
    }

    with Diagram(name='Automation Framework Swarm', direction='LR', graph_attr=graph_attr):
        with Cluster('Docker Cluster'):
            docker = Docker('Docker')

            with Cluster('container1'):
                python_container = Python('APIs\nOther Microservices')

        with Cluster('Kafka Cluster'):
            with Cluster('Zookeeper'):
                Zookeeper('Zookeeper\ntcp:2181')

            with Cluster('REST Proxy'):
                rest_proxy = Custom('REST Proxy\ntcp:8082', 'custom_icons/REST-API.png')

            with Cluster('Control Center'):
                control_center = Kafka('Control Center\ntcp:9021')

            with Cluster('Schema Registry'):
                schema_registry = Storage('Schema Registry\ntcp:8081')

            with Cluster('Brokers'):
                broker_1 = Kafka('Broker 1\ntcp:9092')
                kafka_brokers = [
                    broker_1,
                    Kafka('Broker 2\ntcp:9093'),
                    Kafka('Broker 3\ntcp:9094')
                ]

        with Cluster('Secrets Managers'):
            vault = Vault('HashiCorp Vault\ntcp:8200')
            secrets_managers = [
                vault,
            ]

        with Cluster('Logging and Search'):
            with Cluster('Search and Logging'):
                elastic_search = Elasticsearch('Elastic Search\ntcp:9200')
                kibana = Kibana('Kibana\ntcp:5601')
                logstash = Logstash('Logstash\ntcp:5044')
                search_log = [
                    elastic_search,
                    kibana,
                    logstash
                ]

        with Cluster('Inventory and Connectivity'):
            with Cluster('Inventory'):
                nautobot = Custom('Nautobot\ntcp:8000', 'custom_icons/Nautobot.jpeg')

        kafka_brokers - python_container

        python_container - vault

        python_container - nautobot

        nautobot - logstash
        python_container - logstash
예제 #10
0
def main():
    graph_attr = {
        "fontsize": "45",
        'overlap_scaling': '100',
        'size': '24!',
        'ratio': 'expand'
    }

    with Diagram(name='Automation Framework Compose',
                 direction='LR',
                 graph_attr=graph_attr):
        with Cluster('Docker Cluster'):
            docker = Docker('Docker')

            with Cluster('container1'):
                python_container = Python('APIs\nOther Microservices')

            with Cluster('Docker Registry'):
                docker_registry_container = Docker('Docker Registry\ntcp:5000')

            with Cluster('Docker Registry Browser'):
                docker_registry_browser_container = Python(
                    'Docker Registry Browser\ntcp:8088')

            with Cluster('BatFish'):
                batfish_container = Custom(
                    'BatFish\ntcp:8888\ntcp:9997\ntcp:9996',
                    'custom_icons/BatFish.png')

        with Cluster('Kafka Cluster'):
            with Cluster('Zookeeper'):
                Zookeeper('Zookeeper\ntcp:2181')

            with Cluster('REST Proxy'):
                rest_proxy = Custom('REST Proxy\ntcp:8082',
                                    'custom_icons/REST-API.png')

            with Cluster('Control Center'):
                control_center = Kafka('Control Center\ntcp:9021')

            with Cluster('Schema Registry'):
                schema_registry = Storage('Schema Registry\ntcp:8081')

            with Cluster('Brokers'):
                broker_1 = Kafka('Broker 1\ntcp:9092')
                kafka_brokers = [
                    broker_1,
                    Kafka('Broker 2\ntcp:9093'),
                    Kafka('Broker 3\ntcp:9094')
                ]

        with Cluster('Secrets Managers'):
            vault = Vault('HashiCorp Vault\ntcp:8200')
            secrets_managers = [
                vault,
            ]

        with Cluster('Logging and Search'):
            with Cluster('ELK Stack'):
                elastic_search = Elasticsearch('Elastic Search\ntcp:9200')
                kibana = Kibana('Kibana\ntcp:5601')
                logstash = Logstash('Logstash\ntcp:5044')
                search_log = [elastic_search, kibana, logstash]

            with Cluster('Influxdb'):
                infulxdb = Influxdb('Influxdb\ntcp:8086')

            with Cluster('Grafana'):
                grafana = Grafana('Grafana\ntcp:3000')

        with Cluster('Inventory and Connectivity'):
            with Cluster('Inventory'):
                nautobot = Custom('Nautobot\ntcp:8000',
                                  'custom_icons/Nautobot.jpeg')

        with Cluster('Database'):
            with Cluster('Mongo dB'):
                mongodb = Mongodb('MongoDb\ntcp:27017')
                mongodb_express = Mongodb('MongoDb Express\ntcp:8181')
                mongo_group = [mongodb, mongodb_express]

        with Cluster('CI/CD'):
            team_city = TC('TeamCity')

        kafka_brokers - python_container

        python_container - vault

        python_container - nautobot

        nautobot - logstash
        python_container - logstash

        nautobot - infulxdb
        python_container - infulxdb

        python_container - mongodb
예제 #11
0
            postgres = PostgreSQL("postgres")
        vol_mysql = Volume("db-data")
        vol_mysql >> Edge(color="darkgreen", style="dashed") << mysql

        vol_postgres = Volume("db-data")
        vol_postgres >> Edge(color="darkgreen", style="dashed") << postgres

    with Cluster("dblog service"):
        dblog = Server("dblog")

    with Cluster("  ", graph_attr=cluster_attr):
        with Cluster("zookeeper service"):
            zookeeper = Zookeeper("zookeeper")

        with Cluster("kafka service"):
            kafka = Kafka("kafka")

    vol_kafka = Volume("/var/run/docker.sock")
    vol_kafka >> Edge(color="darkgreen", style="dashed") << kafka
    kafka >> zookeeper
    dblog - zookeeper
    dblog >> mysql
    dblog >> postgres
    kafka - zookeeper


    # dblog >> mysql
    # dblog >> postgres
    # kafka >> zookeeper
    # dblog - zookeeper
    # kafka - zookeeper
예제 #12
0
from diagrams import Cluster, Diagram, Edge
from diagrams.onprem.compute import Server
from diagrams.onprem.network import Envoy
from diagrams.onprem.queue import Kafka
from diagrams.onprem.client import User

with Diagram(name="Example", show=False):
    with Cluster("Back End"):
        services = [Server("service1"), Server("service2"), Server("service3")]

    with Cluster("Front End"):
        gateway = Envoy("HTTP/2 SSE\n/events?userId=...")
        user = User("frontend")
        frontend = [gateway, user]
        gateway << Edge(style="dotted") >> user
    services >> Kafka() >> gateway
from diagrams.onprem.analytics import Spark
from diagrams.onprem.client import Client
from diagrams.onprem.queue import Kafka
from diagrams.onprem.analytics import Hive
from diagrams.generic.storage import Storage

# Documentation: https://diagrams.mingrammer.com/docs/getting-started/installation#quick-start
with Diagram("Click Stream Architecture", show=False):
    with Cluster("Docker Compose"):
        producer = Client("Procuder")

        kafdrop = Client("Kafdrop UI")

        with Cluster("Kafka"):
            click_stream_topics = [
                Kafka("Click Stream"),
                Kafka("Click Stream Metadata")
            ]

        consumer = Client("Consumer")

        with Cluster("Spark"):
            spark_master = Spark("master")
            spark_worker_1 = Spark("worker-1")
            spark_worker_2 = Spark("worker-2")

        parquet = Storage("Parquet File")

        click_stream_topics >> kafdrop
        producer >> click_stream_topics
        click_stream_topics >> consumer
예제 #14
0
from diagrams import Diagram, Cluster
from diagrams.aws.compute import EC2
from diagrams.aws.database import RDS
from diagrams.aws.network import ELB
from diagrams.onprem.queue import Kafka
from diagrams.aws.compute import ECS, EKS, Lambda
from diagrams.gcp.database import Firestore
from diagrams.onprem.analytics import Spark
from diagrams.gcp.storage import Storage
from diagrams.programming.language import Python
from diagrams.gcp.devtools import Code
from diagrams.onprem.gitops import Argocd

with Diagram("StreamState", show=False):
    kafka_input = Kafka("Kafka")
    kafka_output = Kafka("Kafka")

    with Cluster("StreamState cluster"):
        # svc_group = [ECS("web1"), ECS("web2"), ECS("web3")]
        with Cluster("Replay"):
            kafka_storage = Storage("Kafka sink")
            spark_reload = Spark("Replay")

        with Cluster("Realtime"):
            spark_persist = Spark("No transforms")
            spark_state = Spark("Analytical Stream")

        argo = Argocd("Gitops")
        argo >> spark_state
        argo >> spark_reload
        with Cluster("Dev"):
예제 #15
0
from diagrams.onprem.queue import Kafka
from diagrams.onprem.compute import Server
from diagrams.onprem.database import PostgreSQL
from diagrams import Cluster, Diagram, Edge

with Diagram("Monitoring pipeline", show=False):
    with Cluster("Monitoring Agents"):
        agents = [
            Server("Agent1"),
            Server("Agent2"),
            Server("Agent3")
        ]
    with Cluster("Targets"):
        targets = [
            Server("Target1") << Edge(color="darkgreen", label="probe") << agents[0],
            Server("Target2") << Edge(color="darkred", label="probe") << agents[1],
            Server("Target3") << Edge(color="darkgreen", label="probe") << agents[2]
        ]
    with Cluster("agent-probe-results.v1.json"):
        topic = Kafka("")
        agents >> Edge(label="push", reverse=False, forward=True) >> topic

    with Cluster("Results processor"):
        processor = Server("Results processor")
        processor << Edge(label="consume") << topic

    database = PostgreSQL("database.probe_results")
    processor >> database
예제 #16
0
             direction=direction,
             show=False):
    ingress = Nginx("ingress")

    metrics = Prometheus("metric")
    metrics << Edge(color="firebrick", style="dashed") << Grafana("monitoring")

    with Cluster("Service Cluster"):
        grpcsvc = [Server("grpc1"), Server("grpc2"), Server("grpc3")]

    with Cluster("Sessions HA"):
        primary = Redis("session")
        primary - Edge(color="brown",
                       style="dashed") - Redis("replica") << Edge(
                           label="collect") << metrics
        grpcsvc >> Edge(color="brown") >> primary

    with Cluster("Database HA"):
        primary = PostgreSQL("users")
        primary - Edge(color="brown",
                       style="dotted") - PostgreSQL("replica") << Edge(
                           label="collect") << metrics
        grpcsvc >> Edge(color="black") >> primary

    aggregator = Fluentd("logging")
    aggregator >> Edge(label="parse") >> Kafka("stream") >> Edge(
        color="black", style="bold") >> Spark("analytics")

    ingress >> Edge(color="darkgreen") << grpcsvc >> Edge(
        color="darkorange") >> aggregator
예제 #17
0
from diagrams.onprem.database import Cassandra
from diagrams.onprem.queue import Activemq, Celery, Kafka, Rabbitmq, Zeromq
from diagrams.azure.analytics import Hdinsightclusters
from diagrams.alibabacloud.analytics import ElaticMapReduce
from diagrams.aws.analytics import EMR

node_attr = {
    "fontsize":"20"
}
graph_attr = {
    "fontsize":"28"
}

with Diagram("", show=False, node_attr=node_attr):
    with Cluster("Brokers", graph_attr=graph_attr):
        kafka = Kafka("\nKafka")
        activemq = Activemq("\nActiveMQ")
        rabbitmq = Rabbitmq("\nRabbitMQ")
        zeromq = Zeromq("\nZeroMQ")
        kafka - activemq
        rabbitmq - zeromq

    with Cluster("Speed Layer", graph_attr=graph_attr):
        kstream = Kafka("\nKafka\nStreams")
        sparks = Spark("\nSpark Streaming")
        flink = Flink("\nFlink")
        #stream_group = [kstream, sparks, flink]
        kstream - [sparks] - flink

    with Cluster("Batch Layer", graph_attr=graph_attr):
        hdfs = Hadoop("\nHDFS")
예제 #18
0
from diagrams import Diagram, Edge
from diagrams.onprem.database import PostgreSQL
from diagrams.onprem.queue import Kafka
from diagrams.onprem.container import Docker


with Diagram("Monitoring Application", show=False):
    Docker("Monitoring service") >> Edge(color="black", style="bold") >> Kafka(
        "stream"
    ) >> Edge(color="black", style="bold") >> Docker("Consumer service") >> Edge(
        color="black", style="bold"
    ) >> PostgreSQL(
        "storage"
    )
예제 #19
0
from diagrams.onprem.logging import Fluentd
from diagrams.onprem.monitoring import Grafana, Prometheus
from diagrams.onprem.network import Nginx
from diagrams.onprem.queue import Kafka

with Diagram(name="Advanced Web Service with On-Premise (colored)", show=False):
    ingress = Nginx("ingress")

    metrics = Prometheus("metric")
    metrics << Edge(color="firebrick", style="dashed") << Grafana("monitoring")

    with Cluster("Service Cluster"):
        grpcsvc = [
            Server("grpc1"),
            Server("grpc2"),
            Server("grpc3")]

    with Cluster("Sessions HA"):
        master = Redis("session")
        master - Edge(color="brown", style="dashed") - Redis("replica") << Edge(label="collect") << metrics
        grpcsvc >> Edge(color="brown") >> master

    with Cluster("Database HA"):
        master = PostgreSQL("users")
        master - Edge(color="brown", style="dotted") - PostgreSQL("slave") << Edge(label="collect") << metrics
        grpcsvc >> Edge(color="black") >> master

    aggregator = Fluentd("logging")
    aggregator >> Edge(label="parse") >> Kafka("stream") >> Edge(color="black", style="bold") >> Spark("analytics")

    ingress >> Edge(color="darkgreen") << grpcsvc >> Edge(color="darkorange") >> aggregator