Beispiel #1
0
    def load(self, filename):
        logger.info(f"Load triggered - filename {filename}")
        ack = True

        print_cli(f"Loading configuration file at {filename}")

        data, error = self.load_file(filename)

        if error:
            msg = "Configuration not loaded - " + error
            print_cli(None, err=msg, style="error")
        else:
            self.experiment = Experiment("")
            ack = self.experiment.parse(data)

            if ack:
                self.topology = self.experiment.get_topology()
                self.environments.generate_env_cfgs(self.topology)
                msg = "Configuration loaded"
                print_cli(msg, style="normal")
            else:
                msg = "Configuration not loaded - Error parsing scenario data"
                print_cli(None, err=msg, style="error")

        self._status["load"] = ack

        logger.info(f"{msg}")
        return msg
Beispiel #2
0
 def load(self, scenario_message):
     try:
         scenario = self.parse_bytes(scenario_message)
         self.experiment = Experiment("tmp")
         self.experiment.parse(scenario)
         topology = self.experiment.get_topology()
         topology.build()
         self.topology = topology
         ack = True
     except Exception as e:
         logger.info(f"Could not load scenario - exception {repr(e)}")
         ack = False
     finally:
         return ack
Beispiel #3
0
def build():
    iroha_topo = IrohaTopology("remote-1env-3nodes")
    experiment = Experiment("remote-1env-3nodes")
    experiment.set_topology(iroha_topo)

    umbra_default = {
        "id": "umbra-default",
        "remote": False,
        "host": {"address": "localhost"},
        "components": {
            "broker": {
                "uuid": "default-broker",
                "address": "192.168.121.1:8956",
            },
        },
    }

    iroha_topo.set_default_environment(umbra_default)

    env_id = "env_remote"
    env_info = {
        "id": env_id,
        "remote": True,
        "host": {
            "user": "******",
            "address": "192.168.121.101",
            "port": "22",
            "password": "******",
        },
        "components": {
            "scenario": {
                "uuid": "remote-scenario",
                "address": "192.168.121.101:8957",
            },
            "monitor": {
                "uuid": "remote-monitor",
                "address": "192.168.121.101:8958",
            },
        },
    }
    iroha_topo.add_environment(env=env_info)

    iroha_topo.add_iroha_node("node1", "nodes")
    iroha_topo.add_iroha_node("node2", "nodes")
    iroha_topo.add_iroha_node("node3", "nodes")

    iroha_topo.add_network("s1", envid=env_id)

    iroha_topo.add_link_node_network("node3", "s1", "links")
    iroha_topo.add_link_node_network("node2", "s1", "links")
    iroha_topo.add_link_node_network("node1", "s1", "links")

    node_resources = iroha_topo.create_node_profile(cpus=2, memory=2048, disk=None)
    link_resources = iroha_topo.create_link_profile(bw=10, delay="1ms", loss=None)

    iroha_topo.add_node_profile(node_resources, profile="nodes")
    iroha_topo.add_link_profile(link_resources, profile="links")

    experiment.save()
Beispiel #4
0
def build():
    iroha_topo = IrohaTopology("local-3nodes")

    experiment = Experiment("local-3nodes")
    experiment.set_topology(iroha_topo)

    iroha_topo.add_iroha_node("node1", "nodes")
    iroha_topo.add_iroha_node("node2", "nodes")
    iroha_topo.add_iroha_node("node3", "nodes")

    iroha_topo.add_network("s1", envid="umbra-default")

    iroha_topo.add_link_node_network("node3", "s1", "links")
    iroha_topo.add_link_node_network("node2", "s1", "links")
    iroha_topo.add_link_node_network("node1", "s1", "links")

    node_resources = iroha_topo.create_node_profile(cpus=2, memory=2048, disk=None)
    link_resources = iroha_topo.create_link_profile(bw=10, delay="1ms", loss=None)

    iroha_topo.add_node_profile(node_resources, profile="nodes")
    iroha_topo.add_link_profile(link_resources, profile="links")

    experiment.save()
Beispiel #5
0
def builds():

    temp_dir = "/tmp/umbra/fabric/chaincode"
    chaincode_dir = os.path.abspath(os.path.join(temp_dir))

    fab_topo = FabricTopology("local-4orgs", chaincode_dir=chaincode_dir)

    experiment = Experiment("local-4orgs")
    experiment.set_topology(fab_topo)

    fab_topo.add_network("s1", envid="umbra-default")
    fab_topo.add_network("s2", envid="umbra-default")

    fab_topo.add_networks_link(src="s1", dst="s2")

    domain = "example.com"
    image_tag = "2.2.1"
    ca_tag = "1.4.7.1"

    fab_topo.add_org("org1", domain, policies=org1_policy)
    fab_topo.add_peer("peer0",
                      "org1",
                      anchor=True,
                      profile="nodes",
                      image_tag=image_tag)
    fab_topo.add_peer("peer1", "org1", profile="nodes", image_tag=image_tag)

    fab_topo.add_org("org2", domain, policies=org2_policy)
    fab_topo.add_peer("peer0",
                      "org2",
                      anchor=True,
                      profile="nodes",
                      image_tag=image_tag)
    fab_topo.add_peer("peer1", "org2", profile="nodes", image_tag=image_tag)

    fab_topo.add_org("org3", domain, policies=org3_policy)
    fab_topo.add_peer("peer0",
                      "org3",
                      anchor=True,
                      profile="nodes",
                      image_tag=image_tag)

    fab_topo.add_org("org4", domain, policies=org4_policy)
    fab_topo.add_peer("peer0",
                      "org4",
                      anchor=True,
                      profile="nodes",
                      image_tag=image_tag)

    ord_specs = [
        {
            "Hostname": "orderer",
            "SANS": ["localhost"],
        },
    ]

    fab_topo.add_orderer(
        "orderer",
        domain,
        profile="nodes",
        mode="raft",
        specs=ord_specs,
        policies=orderer_policy,
        image_tag=image_tag,
    )

    fab_topo.add_ca("ca",
                    "org1",
                    domain,
                    "admin",
                    "admin_pw",
                    profile="nodes",
                    image_tag=ca_tag)
    fab_topo.add_ca("ca",
                    "org2",
                    domain,
                    "admin",
                    "admin_pw",
                    profile="nodes",
                    image_tag=ca_tag)
    fab_topo.add_ca("ca",
                    "org3",
                    domain,
                    "admin",
                    "admin_pw",
                    profile="nodes",
                    image_tag=ca_tag)
    fab_topo.add_ca("ca",
                    "org4",
                    domain,
                    "admin",
                    "admin_pw",
                    profile="nodes",
                    image_tag=ca_tag)

    fab_topo.configtx(configtx)
    p1 = "TwoOrgsOrdererGenesis.Consortiums.SampleConsortium.Organizations"
    p2 = "TwoOrgsOrdererGenesis.Orderer.Organizations"
    p3 = "TwoOrgsChannel.Application.Organizations"
    fab_topo.set_configtx_profile(p1, ["org1", "org2", "org3", "org4"])
    fab_topo.set_configtx_profile(p2, ["orderer"])
    fab_topo.set_configtx_profile(p3, ["org1", "org2", "org3", "org4"])

    # If needed you can multiplex nodes of a org to be connected to separate networks
    # Use the function add_node_network_link, with the params (org, node_name, network, profile_name)
    fab_topo.add_node_network_link("org1", "peer0", "s1", "links")
    fab_topo.add_node_network_link("org1", "peer1", "s2", "links")
    fab_topo.add_node_network_link("org1", "ca", "s2", "links")
    # fab_topo.add_org_network_link("org1", "s1", "links")
    fab_topo.add_node_network_link("org2", "peer0", "s1", "links")
    fab_topo.add_node_network_link("org2", "peer1", "s2", "links")
    fab_topo.add_node_network_link("org2", "ca", "s2", "links")
    # fab_topo.add_org_network_link("org2", "s2", "links")
    fab_topo.add_org_network_link("org3", "s1", "links")
    fab_topo.add_org_network_link("org4", "s2", "links")
    fab_topo.add_org_network_link("orderer", "s1", "links")

    node_resources = fab_topo.create_node_profile(cpus=1,
                                                  memory=1024,
                                                  disk=None)
    link_resources = fab_topo.create_link_profile(bw=1, delay="2ms", loss=None)

    fab_topo.add_node_profile(node_resources, profile="nodes")
    fab_topo.add_link_profile(link_resources, profile="links")

    # Save config file
    experiment.save()
Beispiel #6
0
def builds():

    # Umbra keeps everything in /tmp/umbra, and in the case of fabric
    # the configs generated will be in /tmp/umbra/fabric/"name of the experiment".

    temp_dir = "/tmp/umbra/fabric/chaincode"
    chaincode_dir = os.path.abspath(os.path.join(temp_dir))

    # Defines Fabric Topology - main class to have orgs/peers/cas/orderers
    # From the FabricTopology, it is possible to define the whole set of orgs,
    # peers, CAs and orderers that compose the network experiment.
    # The chaincode directory can be specified if the events of the experiment
    # make use of any of the chaincodes in that dir, so umbra knows the right
    # place to look for them.
    fab_topo = FabricTopology("local-2orgs", chaincode_dir=chaincode_dir)

    # Defines experiment containing topology, later events can be added.
    # An experiment consists of a topology and events. It sets the ground for
    # what is going to actually be instantiated and executed.
    experiment = Experiment("local-2orgs")
    experiment.set_topology(fab_topo)

    # Environments in umbra are the places (i.e., baremetal servers and/or virtual machines)
    # where the components of umbra are executed, and consequently the topology itself.
    # An environment can be remote or local (parameter remote set to true or false).
    # The user must be allowed to execute operations as sudo in the environment.
    # In a local environment the user will be the logged in user that used to instantiate and run umbra.
    # By default, umbra defines for its topologies the umbra-default environment,
    # meaning all the components will be executed in the host machine, using the localhost address.

    # Umbra realizes a network is associated with an environment, and it realizes
    # all the nodes interconnected to this network are also in the same environment
    # as the network. All the proper settings regarding the reachability of
    # the nodes, network, environment is handled by umbra.

    # The network must be associated with the environment where it is going
    # to be placed/executed. All the nodes connected to the network will be deployed
    # in the environment where the network is placed.
    fab_topo.add_network("s1", envid="umbra-default")

    # The definitions of the fabric topology settings, e.g., domain and tag of the
    # container images that are going to be used to instantiate the fabric nodes.
    domain = "example.com"
    image_tag = "2.2.1"
    ca_tag = "1.4.7.1"

    # The topology can be composed by orgs and their peers.
    fab_topo.add_org("org1", domain, policies=org1_policy)
    fab_topo.add_peer(
        "peer0", "org1", anchor=True, profile="nodes", image_tag=image_tag
    )
    fab_topo.add_peer("peer1", "org1", profile="nodes", image_tag=image_tag)

    fab_topo.add_org("org2", domain, policies=org2_policy)
    fab_topo.add_peer(
        "peer0", "org2", anchor=True, profile="nodes", image_tag=image_tag
    )
    fab_topo.add_peer("peer1", "org2", profile="nodes", image_tag=image_tag)

    ord_specs = [
        {
            "Hostname": "orderer",
            "SANS": ["localhost"],
        },
    ]

    # The topology can be composed by orderers with their proper parameters.
    fab_topo.add_orderer(
        "orderer",
        domain,
        profile="nodes",
        mode="raft",
        specs=ord_specs,
        policies=orderer_policy,
        image_tag=image_tag,
    )

    # The topology can be composed by CAs with their proper parameters.
    fab_topo.add_ca(
        "ca", "org1", domain, "admin", "admin_pw", profile="nodes", image_tag=ca_tag
    )
    fab_topo.add_ca(
        "ca", "org2", domain, "admin", "admin_pw", profile="nodes", image_tag=ca_tag
    )

    # Umbra makes the constructions of a configtx.yml file based on the policies specified
    # and the configtx base specification.
    # Such configtx specification contains the definition of the profiles to be used
    # by the network to generate the fabric topology artifacts (e.g., genesis.block).
    # In order to fulfill the correct parameters of each profile in configtx, the path
    # of the profiles must be specified, separated by dots. And each of these profiles
    # must be defined with the names of the orgs or orderers that must fill their settings in.
    # Then, umbra understands it must fill the proper path of a configtx profile with
    # the correct information of a org/orderer in order to compose the complete configtx.yml
    # file to create the orgs/orderers/CAs artifacts.
    # In a future release, umbra is planned to detect those configs and make the proper
    # definition of settings in configtx.
    fab_topo.configtx(configtx)
    p1 = "TwoOrgsOrdererGenesis.Consortiums.SampleConsortium.Organizations"
    p2 = "TwoOrgsOrdererGenesis.Orderer.Organizations"
    p3 = "TwoOrgsChannel.Application.Organizations"
    fab_topo.set_configtx_profile(p1, ["org1", "org2"])
    fab_topo.set_configtx_profile(p2, ["orderer"])
    fab_topo.set_configtx_profile(p3, ["org1", "org2"])

    # The interconnection of umbra orgs/orderer to the network must be defined.
    # When a org is connected to a network, all its peers/CAs are connected to the network too.
    fab_topo.add_org_network_link("org1", "s1", "links")
    fab_topo.add_org_network_link("org2", "s1", "links")
    fab_topo.add_org_network_link("orderer", "s1", "links")
    # If needed you can multiplex nodes of a org to be connected to separate networks
    # Use the function add_node_network_link, with the params (org, node_name, network, profile_name)
    # e.g., fab_topo.add_node_network_link("org1", "peer0", "s1", "links")
    # Remember to add all the nodes of an org when using this function.

    # Defines the resource profiles for nodes and links.
    # The amount of node resources are defined as the maximum number of logical CPUs
    # a node can make use; the amount of memory (MB) the node can utilize, as disk is yet
    # not implemented.
    node_resources = fab_topo.create_node_profile(cpus=1, memory=1024, disk=None)
    # The specification of link resource are defined as bandwidth (MB), delay,
    # and loss (percentage of packet loss, e.g., 0.1 for 10%).
    link_resources = fab_topo.create_link_profile(bw=1, delay="2ms", loss=None)

    # Each node and link in the network can be associated with a resource profile.
    # The names of the profiles are associated with the creation of the node and link
    # resources.
    # I.e., in each node of the network, there was defined the profile="nodes", the line
    # below specified that such profile named "nodes" must have the configuration of
    # resources associated with node_resources, i.e., "cpus=1, memory=1024, disk=None"
    # Similarly the same happens for links, their profiles are defined by the work "links",
    # and in the line below the "links" profile receive the assignment of resource defined
    # by link_resources, i.e., "bw=1, delay="2ms", loss=None".
    fab_topo.add_node_profile(node_resources, profile="nodes")
    fab_topo.add_link_profile(link_resources, profile="links")

    # Save the experiment.
    # When saving, an experiment (topology and events) are properly compiled in a format
    # that umbra later can load and utilize.
    # Saving an experiment means all the topology artifacts are going to be built.
    # In the case of fabric, it means umbra running cryptogen and configtxgen in the built
    # files cryptoconfig.yml and configtx.yml generated by umbra, utilizing the specified
    # fabrictopology components.
    experiment.save()
def builds():

    temp_dir = "/tmp/umbra/fabric/chaincode"
    chaincode_dir = os.path.abspath(os.path.join(temp_dir))

    fab_topo = FabricTopology("remote-2envs-4orgs",
                              chaincode_dir=chaincode_dir)

    experiment = Experiment("remote-2envs-4orgs")
    experiment.set_topology(fab_topo)

    umbra_default = {
        "id": "umbra-default",
        "remote": False,
        "host": {},
        "components": {
            # "scenario": {
            #     "uuid": "default-scenario",
            #     "address": "192.168.122.1:8957",
            # },
            "broker": {
                "uuid": "default-broker",
                "address": "192.168.121.1:8956",
            },
        },
    }

    fab_topo.set_default_environment(umbra_default)

    env0_id = "env1"
    env0_info = {
        "id": env0_id,
        "remote": True,
        "host": {
            "user": "******",
            "address": "192.168.121.101",
            "port": "22",
            "password": "******",
        },
        "components": {
            "scenario": {
                "uuid": "y-scenario",
                "address": "192.168.121.101:8957",
            },
            "monitor": {
                "uuid": "y-monitor",
                "address": "192.168.121.101:8958",
            },
        },
    }

    env1_id = "env2"
    env1_info = {
        "id": env1_id,
        "remote": True,
        "host": {
            "user": "******",
            "address": "192.168.121.102",
            "port": "22",
            "password": "******",
        },
        "components": {
            "scenario": {
                "uuid": "z-scenario",
                "address": "192.168.121.102:8957",
            },
            "monitor": {
                "uuid": "z-monitor",
                "address": "192.168.121.102:8958",
            },
        },
    }

    fab_topo.add_environment(env=env0_info)
    fab_topo.add_environment(env=env1_info)

    fab_topo.add_network("s1", envid=env0_id)
    fab_topo.add_network("s2", envid=env1_id)

    fab_topo.add_networks_link(src="s1", dst="s2")

    domain = "example.com"
    image_tag = "2.2.1"
    ca_tag = "1.4.7.1"

    fab_topo.add_org("org1", domain, policies=org1_policy)
    fab_topo.add_peer("peer0",
                      "org1",
                      anchor=True,
                      profile="nodes",
                      image_tag=image_tag)
    fab_topo.add_peer("peer1", "org1", profile="nodes", image_tag=image_tag)

    fab_topo.add_org("org2", domain, policies=org2_policy)
    fab_topo.add_peer("peer0",
                      "org2",
                      anchor=True,
                      profile="nodes",
                      image_tag=image_tag)
    fab_topo.add_peer("peer1", "org2", profile="nodes", image_tag=image_tag)

    fab_topo.add_org("org3", domain, policies=org3_policy)
    fab_topo.add_peer("peer0",
                      "org3",
                      anchor=True,
                      profile="nodes",
                      image_tag=image_tag)

    fab_topo.add_org("org4", domain, policies=org4_policy)
    fab_topo.add_peer("peer0",
                      "org4",
                      anchor=True,
                      profile="nodes",
                      image_tag=image_tag)

    ord_specs = [
        {
            "Hostname": "orderer",
            "SANS": ["localhost"],
        },
    ]

    fab_topo.add_orderer(
        "orderer",
        domain,
        profile="nodes",
        mode="raft",
        specs=ord_specs,
        policies=orderer_policy,
        image_tag=image_tag,
    )

    fab_topo.add_ca("ca",
                    "org1",
                    domain,
                    "admin",
                    "admin_pw",
                    profile="nodes",
                    image_tag=ca_tag)
    fab_topo.add_ca("ca",
                    "org2",
                    domain,
                    "admin",
                    "admin_pw",
                    profile="nodes",
                    image_tag=ca_tag)
    fab_topo.add_ca("ca",
                    "org3",
                    domain,
                    "admin",
                    "admin_pw",
                    profile="nodes",
                    image_tag=ca_tag)
    fab_topo.add_ca("ca",
                    "org4",
                    domain,
                    "admin",
                    "admin_pw",
                    profile="nodes",
                    image_tag=ca_tag)

    fab_topo.configtx(configtx)
    p1 = "TwoOrgsOrdererGenesis.Consortiums.SampleConsortium.Organizations"
    p2 = "TwoOrgsOrdererGenesis.Orderer.Organizations"
    p3 = "TwoOrgsChannel.Application.Organizations"
    fab_topo.set_configtx_profile(p1, ["org1", "org2", "org3", "org4"])
    fab_topo.set_configtx_profile(p2, ["orderer"])
    fab_topo.set_configtx_profile(p3, ["org1", "org2", "org3", "org4"])

    fab_topo.add_org_network_link("org1", "s1", "links")
    fab_topo.add_org_network_link("org2", "s2", "links")
    fab_topo.add_org_network_link("org3", "s1", "links")
    fab_topo.add_org_network_link("org4", "s2", "links")
    fab_topo.add_org_network_link("orderer", "s1", "links")

    node_resources = fab_topo.create_node_profile(cpus=1,
                                                  memory=1024,
                                                  disk=None)
    link_resources = fab_topo.create_link_profile(bw=1, delay="2ms", loss=None)

    fab_topo.add_node_profile(node_resources, profile="nodes")
    fab_topo.add_link_profile(link_resources, profile="links")

    # Save config file
    experiment.save()
Beispiel #8
0
def build():
    iroha_topo = IrohaTopology("local-3nodes-events")

    experiment = Experiment("local-3nodes-events")
    experiment.set_topology(iroha_topo)

    iroha_topo.add_iroha_node("node1", "nodes")
    iroha_topo.add_iroha_node("node2", "nodes")
    iroha_topo.add_iroha_node("node3", "nodes")

    iroha_topo.add_network("s1", envid="umbra-default")

    iroha_topo.add_link_node_network("node3", "s1", "links")
    iroha_topo.add_link_node_network("node2", "s1", "links")
    iroha_topo.add_link_node_network("node1", "s1", "links")

    node_resources = iroha_topo.create_node_profile(cpus=2,
                                                    memory=2048,
                                                    disk=None)
    link_resources = iroha_topo.create_link_profile(bw=10,
                                                    delay=None,
                                                    loss=None)

    iroha_topo.add_node_profile(node_resources, profile="nodes")
    iroha_topo.add_link_profile(link_resources, profile="links")

    ev_01 = {
        "action": "create_domain",
        "node": "node2",
        "user": "******",
        "domain": "umbra2",
        "default_role": "user",
    }

    ev_02 = {
        "action": "create_account",
        "node": "node2",
        "user": "******",
        "domain": "umbra",
        "account_pubkey":
        "c4dec3e3478dca78af4bde6fd3d533f99426cdb73c655b45e597216207969d47",
        "account_name": "irohatester",
    }

    ev_03 = {
        "node": "node2",
        "user": "******",
        "action": "set_account_detail",
        "account_id": "admin@umbra",
        "account_detail_name": "age",
        "account_detail_value": "18",
    }

    ev_04 = {
        "node": "node2",
        "user": "******",
        "action": "create_asset",
        "domain": "umbra",
        "asset_name": "vidas",
        "asset_precision": 2,
    }

    ev_05 = {
        "node": "node2",
        "user": "******",
        "action": "add_asset_quantity",
        "asset_id": "vidas#umbra",
        "asset_amount": "500.00",
    }

    ev_06 = {
        "node": "node2",
        "user": "******",
        "action": "transfer_asset",
        "asset_id": "vidas#umbra",
        "asset_amount": "50.00",
        "src_account_id": "admin@umbra",
        "dest_account_id": "test@umbra",
        "description": "plus vidas",
    }

    ev_07 = {
        "node": "node2",
        "user": "******",
        "action": "get_asset_info",
        "asset_id": "vidas#umbra",
    }

    ev_08 = {
        "node": "node2",
        "user": "******",
        "action": "get_account_assets",
        "account_id": "admin@umbra",
    }

    ev_09 = {
        "node": "node2",
        "user": "******",
        "action": "get_account_detail",
        "account_id": "admin@umbra",
    }

    ev_10 = {
        "node": "node2",
        "user": "******",
        "action": "grant_permission",
        "account_id": "admin@umbra",
        "account": "test@umbra",
        "permission": "can_set_my_account_detail",
    }

    sched_ev_01 = {"from": 11}
    sched_ev_02 = {"from": 13}
    sched_ev_03 = {"from": 15}
    sched_ev_04 = {"from": 17}
    sched_ev_05 = {"from": 19}
    sched_ev_06 = {"from": 21}
    sched_ev_07 = {"from": 23}
    sched_ev_08 = {"from": 25}
    sched_ev_09 = {"from": 27}
    sched_ev_10 = {"from": 30}

    experiment.add_event(sched=sched_ev_01, category="iroha", event=ev_01)
    experiment.add_event(sched=sched_ev_02, category="iroha", event=ev_02)
    experiment.add_event(sched=sched_ev_03, category="iroha", event=ev_03)
    experiment.add_event(sched=sched_ev_04, category="iroha", event=ev_04)
    experiment.add_event(sched=sched_ev_05, category="iroha", event=ev_05)
    experiment.add_event(sched=sched_ev_06, category="iroha", event=ev_06)
    experiment.add_event(sched=sched_ev_07, category="iroha", event=ev_07)
    experiment.add_event(sched=sched_ev_08, category="iroha", event=ev_08)
    experiment.add_event(sched=sched_ev_09, category="iroha", event=ev_09)
    experiment.add_event(sched=sched_ev_10, category="iroha", event=ev_10)

    experiment.save()
def builds():

    # Umbra keeps everything in /tmp/umbra, and in the case of fabric
    # the configs generated will be in /tmp/umbra/fabric/"name of the experiment".

    temp_dir = "/tmp/umbra/fabric/chaincode"
    chaincode_dir = os.path.abspath(os.path.join(temp_dir))

    # Defines Fabric Topology - main class to have orgs/peers/cas/orderers
    # From the FabricTopology, it is possible to define the whole set of orgs,
    # peers, CAs and orderers that compose the network experiment.
    # The chaincode directory can be specified if the events of the experiment
    # make use of any of the chaincodes in that dir, so umbra knows the right
    # place to look for them.
    fab_topo = FabricTopology("local-2orgs-events",
                              chaincode_dir=chaincode_dir)

    # Defines experiment containing topology, later events can be added.
    # An experiment consists of a topology and events. It sets the ground for
    # what is going to actually be instantiated and executed.
    experiment = Experiment("local-2orgs-events")
    experiment.set_topology(fab_topo)

    # Environments in umbra are the places (i.e., baremetal servers and/or virtual machines)
    # where the components of umbra are executed, and consequently the topology itself.
    # An environment can be remote or local (parameter remote set to true or false).
    # The user must be allowed to execute operations as sudo in the environment.
    # In a local environment the user will be the logged in user that used to instantiate and run umbra.
    # By default, umbra defines for its topologies the umbra-default environment,
    # meaning all the components will be executed in the host machine, using the localhost address.

    # Umbra realizes a network is associated with an environment, and it realizes
    # all the nodes interconnected to this network are also in the same environment
    # as the network. All the proper settings regarding the reachability of
    # the nodes, network, environment is handled by umbra.

    # The network must be associated with the environment where it is going
    # to be placed/executed. All the nodes connected to the network will be deployed
    # in the environment where the network is placed.
    fab_topo.add_network("s1", envid="umbra-default")

    # The definitions of the fabric topology settings, e.g., domain and tag of the
    # container images that are going to be used to instantiate the fabric nodes.
    domain = "example.com"
    image_tag = "2.2.1"
    ca_tag = "1.4.7.1"

    # The topology can be composed by orgs and their peers.
    fab_topo.add_org("org1", domain, policies=org1_policy)
    fab_topo.add_peer("peer0",
                      "org1",
                      anchor=True,
                      profile="nodes",
                      image_tag=image_tag)
    fab_topo.add_peer("peer1", "org1", profile="nodes", image_tag=image_tag)

    fab_topo.add_org("org2", domain, policies=org2_policy)
    fab_topo.add_peer("peer0",
                      "org2",
                      anchor=True,
                      profile="nodes",
                      image_tag=image_tag)
    fab_topo.add_peer("peer1", "org2", profile="nodes", image_tag=image_tag)

    ord_specs = [
        {
            "Hostname": "orderer",
            "SANS": ["localhost"],
        },
    ]

    # The topology can be composed by orderers with their proper parameters.
    fab_topo.add_orderer(
        "orderer",
        domain,
        profile="nodes",
        mode="raft",
        specs=ord_specs,
        policies=orderer_policy,
        image_tag=image_tag,
    )

    # The topology can be composed by CAs with their proper parameters.
    fab_topo.add_ca("ca",
                    "org1",
                    domain,
                    "admin",
                    "admin_pw",
                    profile="nodes",
                    image_tag=ca_tag)
    fab_topo.add_ca("ca",
                    "org2",
                    domain,
                    "admin",
                    "admin_pw",
                    profile="nodes",
                    image_tag=ca_tag)

    # Umbra makes the constructions of a configtx.yml file based on the policies specified
    # and the configtx base specification.
    # Such configtx specification contains the definition of the profiles to be used
    # by the network to generate the fabric topology artifacts (e.g., genesis.block).
    # In order to fulfill the correct parameters of each profile in configtx, the path
    # of the profiles must be specified, separated by dots. And each of these profiles
    # must be defined with the names of the orgs or orderers that must fill their settings in.
    # Then, umbra understands it must fill the proper path of a configtx profile with
    # the correct information of a org/orderer in order to compose the complete configtx.yml
    # file to create the orgs/orderers/CAs artifacts.
    # In a future release, umbra is planned to detect those configs and make the proper
    # definition of settings in configtx.
    fab_topo.configtx(configtx)
    p1 = "TwoOrgsOrdererGenesis.Consortiums.SampleConsortium.Organizations"
    p2 = "TwoOrgsOrdererGenesis.Orderer.Organizations"
    p3 = "TwoOrgsChannel.Application.Organizations"
    fab_topo.set_configtx_profile(p1, ["org1", "org2"])
    fab_topo.set_configtx_profile(p2, ["orderer"])
    fab_topo.set_configtx_profile(p3, ["org1", "org2"])

    # The interconnection of umbra orgs/orderer to the network must be defined.
    # When a org is connected to a network, all its peers/CAs are connected to the network too.
    fab_topo.add_org_network_link("org1", "s1", "links")
    fab_topo.add_org_network_link("org2", "s1", "links")
    fab_topo.add_org_network_link("orderer", "s1", "links")

    # Defines the resource profiles for nodes and links.
    # The amount of node resources are defined as the maximum number of logical CPUs
    # a node can make use; the amount of memory (MB) the node can utilize, as disk is yet
    # not implemented.
    node_resources = fab_topo.create_node_profile(cpus=1,
                                                  memory=1024,
                                                  disk=None)
    # The specification of link resource are defined as bandwidth (MB), delay,
    # and loss (percentage of packet loss, e.g., 0.1 for 10%).
    link_resources = fab_topo.create_link_profile(bw=1, delay="2ms", loss=None)

    # Each node and link in the network can be associated with a resource profile.
    # The names of the profiles are associated with the creation of the node and link
    # resources.
    # I.e., in each node of the network, there was defined the profile="nodes", the line
    # below specified that such profile named "nodes" must have the configuration of
    # resources associated with node_resources, i.e., "cpus=1, memory=1024, disk=None"
    # Similarly the same happens for links, their profiles are defined by the work "links",
    # and in the line below the "links" profile receive the assignment of resource defined
    # by link_resources, i.e., "bw=1, delay="2ms", loss=None".
    fab_topo.add_node_profile(node_resources, profile="nodes")
    fab_topo.add_link_profile(link_resources, profile="links")

    # Events are used to interact with the topology.
    # Events can be of scenario category when related to modifications
    # that might happen in run-time with the deployed topology, be its nodes and/or links.
    # In the case of a scenario event it must contain the specific group (nodes or links)
    # that its target belongs to.
    # A target is the reference name to the node (full name) or link (src and dst in any order).
    # The event specs contains the details about the event.
    # For now in the specs of a scenario event, the action can be only update (later add/remove will be added too).
    # In specs: online means if the node/link will be up or down; resources mean the definition of resource the
    # node or link will have.
    # In links group, a link might have resources specified as bw (bandwidth Mbps), delay (string with number and unit)
    # and loss (packet loss ration as percentage 0-100).
    # In nodes group, a node might have resources specified as docker allows, to have a complete list
    # see https://docs.docker.com/engine/reference/commandline/update/
    # or API docs: https://docker-py.readthedocs.io/en/stable/api.html#module-docker.api.container
    # examples of node resources are: blkio_weight, cpu_period, cpu_quota, cpu_shares, cpuset_cpus,
    # cpuset_mems, mem_limit, mem_reservation, memswap_limit, kernel_memory, restart_policy
    # When the action update is taken on a node, its is not actually stopped or started, it is
    # paused and unpaused (i.e., all its processes are paused or resumed).
    ev_scenario_01 = {
        "group": "links",
        "specs": {
            "action": "update",
            "online": True,
            "resources": {
                "bw": 3,
                "delay": "4ms",
                "loss": 1,
            },
        },
        "target": ("s1", "peer0.org1.example.com"),
    }

    ev_scenario_02 = {
        "group": "nodes",
        "specs": {
            "action": "update",
            "online": False,
            "resources": {},
        },
        "target": "peer0.org1.example.com",
    }

    ev_scenario_03 = {
        "group": "nodes",
        "specs": {
            "action": "update",
            "online": True,
            "resources": {},
        },
        "target": "peer0.org1.example.com",
    }

    # Events are scheduled by the moment umbra-broker receives the confirmation
    # the topology was successfully instantiated by umbra-scenario, it means time 0.
    # From 0 on all events can be scheduled with the from keyword.
    # The scheduling can take place with other keywords (all integers) too:
    # e.g., sched = {"from": 0, "until": 0, "duration": 0, "interval": 0, "repeat": 0}
    # from is the time when the event must be triggered (0-...)
    # untill is the time the event stops
    # duration is the total duration of time the event must be executed
    # interval is the time the event must wait until its trigger is repeated
    # repeat is the amount of times the event must be triggered
    # For instance:
    # e.g., sched = {"from": 0, "until": 10, "duration": 2, "interval": 1, "repeat": 3}
    # The sched above will start the event in moment 0, repeat the event 3 times, waiting
    # 1 second between each repeatition, have the event last no more than 2 seconds, until
    # all the previous time summed reach 10 seconds. If the event finished before 2 seconds,
    # that's all fine.
    # Summed, it has 2 (duration) x 3 (repeat) + 1 (interval) x (3 repeat) = 9 seconds
    # It will finish before the until 10 seconds is reached.
    # The repeatitions stop when until timeout is reached.
    sched_ev_01 = {"from": 2}
    sched_ev_02 = {"from": 10}
    sched_ev_03 = {"from": 20}

    # Events are added by category.
    # scenario refers to infrastructure/umbra-scenario events (nodes/links up/down
    # and resource updates)
    # Other categories include blockchain models events. Meaning, fabric, iroha, indy, etc.
    # The proper definition of the event must exist for each one of the blockchain projects.
    # The events are defined according to the broker plugins.
    # In broker, a plugin is a extension that gives support to the events that a python SDK
    # of a particular blockchain project is consumed.
    # In local-4orgs-events.py example there are examples of fabric events.
    experiment.add_event(sched=sched_ev_01,
                         category="scenario",
                         event=ev_scenario_01)
    experiment.add_event(sched=sched_ev_02,
                         category="scenario",
                         event=ev_scenario_02)
    experiment.add_event(sched=sched_ev_03,
                         category="scenario",
                         event=ev_scenario_03)

    # Save the experiment.
    # When saving, an experiment (topology and events) are properly compiled in a format
    # that umbra later can load and utilize.
    # Saving an experiment means all the topology artifacts are going to be built.
    # In the case of fabric, it means umbra running cryptogen and configtxgen in the built
    # files cryptoconfig.yml and configtx.yml generated by umbra, utilizing the specified
    # fabrictopology components.
    experiment.save()
Beispiel #10
0
class CLIRunner:
    def __init__(self):
        self.experiment = None
        self.topology = None
        self.experiment_config = {}
        self.environments = Environments()
        self.broker_interface = BrokerInterface()
        self.cmds = {
            "load": self.load,
            "start": self.start,
            "stop": self.stop,
            "install": self.install,
            "uninstall": self.uninstall,
            "begin": self.begin,
            "end": self.end,
        }

        self._status = {
            "load": False,
            "start": False,
            "stop": False,
            "install": False,
            "uninstall": False,
            "begin": False,
            "end": False,
        }
        logger.info("CLIRunner init")

    def get_cmds(self):
        return list(self.cmds.keys())

    def filepath(self, name):
        filepath = os.path.normpath(os.path.join(os.path.dirname(__file__), name))
        return filepath

    def load_file(self, filename):
        filepath = self.filepath(filename)
        data = {}
        error = ""
        try:
            with open(filepath, "+r") as fp:
                data = json.load(fp)
        except Exception as e:
            error = f"Load file error: {repr(e)}"
            logger.debug(error)
        else:
            logger.debug(f"Load file ok")
        finally:
            return data, error

    def load(self, filename):
        logger.info(f"Load triggered - filename {filename}")
        ack = True

        print_cli(f"Loading configuration file at {filename}")

        data, error = self.load_file(filename)

        if error:
            msg = "Configuration not loaded - " + error
            print_cli(None, err=msg, style="error")
        else:
            self.experiment = Experiment("")
            ack = self.experiment.parse(data)

            if ack:
                self.topology = self.experiment.get_topology()
                self.environments.generate_env_cfgs(self.topology)
                msg = "Configuration loaded"
                print_cli(msg, style="normal")
            else:
                msg = "Configuration not loaded - Error parsing scenario data"
                print_cli(None, err=msg, style="error")

        self._status["load"] = ack

        logger.info(f"{msg}")
        return msg

    async def start(self):
        logger.info(f"Start triggered")

        print_cli(f"Starting", style="attention")

        ack, messages = self.environments.implement_env_cfgs("start")
        self._status["start"] = ack

        logger.info(f"{messages}")
        return ack, messages

    async def stop(self):
        logger.info(f"Stop triggered")

        print_cli(f"Stopping", style="attention")

        ack, messages = self.environments.implement_env_cfgs("stop")
        self._status["start"] = not ack
        self._status["stop"] = ack

        logger.info(f"{messages}")
        return messages

    async def install(self):
        logger.info(f"install triggered")

        print_cli(f"Installing", style="attention")

        ack, messages = self.environments.implement_env_cfgs("install")
        self._status["install"] = ack

        logger.info(f"{messages}")
        return ack, messages

    async def uninstall(self):
        logger.info(f"uninstall triggered")

        print_cli(f"Uninstalling", style="attention")

        ack, messages = self.environments.implement_env_cfgs("uninstall")
        self._status["install"] = not ack
        self._status["uninstall"] = ack

        logger.info(f"{messages}")
        return messages

    async def begin(self):
        logger.info(f"begin triggered")

        print_cli(f"Beginning", style="attention")

        default_env = self.topology.get_default_environment()
        default_env_components = default_env.get("components")
        broker_env = default_env_components.get("broker")

        print_cli(f"Experiment Begin", style="info")
        scenario = self.experiment.dump()
        reply, error = await self.broker_interface.begin(broker_env, scenario)

        ack = False if error else True
        self._status["begin"] = ack

        if ack:
            print_cli(f"Umbra Experiment Ok", style="normal")
            messages = reply
        else:
            print_cli(f"Umbra Experiment Error", style="error")
            messages = error

        logger.info(f"{messages}")
        return ack, messages

    async def end(self):
        logger.info(f"end triggered")

        print_cli(f"Ending", style="attention")

        default_env = self.topology.get_default_environment()
        default_env_components = default_env.get("components")
        broker_env = default_env_components.get("broker")

        print_cli(f"Experiment End", style="info")
        scenario = self.experiment.dump()
        reply, error = await self.broker_interface.end(broker_env, scenario)

        ack = False if error else True
        self._status["end"] = ack
        self._status["begin"] = not ack

        if ack:
            print_cli(f"Ended Umbra Experiment", style="normal")
            messages = reply
        else:
            print_cli(f"Ended Umbra Experiment Error", style="error")
            messages = error

        logger.info(f"{messages}")
        return ack, messages

    def status(self, command):
        ack = False
        error = ""

        if command == "load":
            ack = not self._status["start"]
            if not ack:
                error = "Cannot load - config started - stop it first"

        if command == "start":
            ack = self._status["load"] and not self._status["start"]
            if not ack:
                error = "Cannot start - config not loaded or config started"

        if command == "stop":
            ack = self._status["start"] and not self._status["stop"]
            if not ack:
                error = "Cannot stop - config not started or config stopped"

        if command == "install":
            pass

        if command == "uninstall":
            pass

        if command == "begin":
            pass

        if command == "end":
            pass

        return True, error

    async def execute(self, cmds):
        cmd = cmds[0]
        logger.info(f"Executing commands: {cmds}")

        ok, error = self.status(cmd)

        if ok:
            available_cmds = list(self.cmds.keys())

            if cmd == "load":
                if len(cmds) == 2:
                    config_filename = cmds[1]
                    output = self.load(config_filename)
                    return output
                else:
                    return "Missing config filepath"

            if cmd in available_cmds:
                func = self.cmds.get(cmd)
                output = await func()
                return output

            else:
                output = f"Command not found in {available_cmds}"
                return output

        else:
            return error
Beispiel #11
0
class Operator:
    def __init__(self, info):
        self.info = info
        self.experiment = None
        self.topology = None
        self.plugins = {}
        self.events_handler = Handler()
        self.events_fabric = FabricEvents()
        self.events_iroha = IrohaEvents()
        self.events_scenario = ScenarioEvents()
        self.events_results = {}

    def parse_bytes(self, msg):
        msg_dict = {}

        if type(msg) is bytes:
            msg_str = msg.decode("utf-8")
            if msg_str:
                msg_dict = json.loads(msg_str)

        return msg_dict

    def serialize_bytes(self, msg):
        msg_bytes = b""

        if type(msg) is dict:
            msg_str = json.dumps(msg)
            msg_bytes = msg_str.encode("utf-8")

        return msg_bytes

    async def call_monitor(self, address, data):
        logger.info(f"Calling Monitor - {address}")

        directrix = json_format.ParseDict(data, Directrix())
        host, port = address.split(":")

        try:
            channel = Channel(host, port)
            stub = MonitorStub(channel)
            status = await stub.Measure(directrix)

        except Exception as e:
            ack = False
            info = repr(e)
            logger.info(f"Error - monitor failed - {info}")
        else:
            if status.error:
                ack = False
                logger.info(f"Monitor error: {status.error}")
                info = status.error
            else:
                ack = True
                if status.info:
                    info = self.parse_bytes(status.info)
                else:
                    info = {}
                logger.info(f"Monitor info: {info}")
        finally:
            channel.close()

        return ack, info

    def get_monitor_env_address(self, env):
        envs = self.topology.get_environments()
        env_data = envs.get(env)
        env_components = env_data.get("components")
        env_monitor_component = env_components.get("monitor")
        env_monitor_address = env_monitor_component.get("address")
        return env_monitor_address

    def build_monitor_directrix(self, env, info, action):

        if action == "start":
            hosts = info.get("topology").get("hosts")
            targets = repr(set(hosts.keys()))
        else:
            targets = repr(set())

        data = {
            "action":
            action,
            "flush": {
                "live": True,
                "environment": env,
                "address": self.info.get("address"),
            },
            "sources": [
                {
                    "id": 1,
                    "name": "container",
                    "parameters": {
                        "targets": targets,
                        "duration": "3600",
                        "interval": "5",
                    },
                    "schedule": {},
                },
                {
                    "id": 2,
                    "name": "host",
                    "parameters": {
                        "duration": "3600",
                        "interval": "5",
                    },
                    "schedule": {},
                },
            ],
        }

        return data

    async def call_monitors(self, stats, action):
        logger.info(f"Call monitors")

        all_acks = {}
        for env, info in stats.items():
            data = self.build_monitor_directrix(env, info, action)
            address = self.get_monitor_env_address(env)
            ack, info = await self.call_monitor(address, data)
            all_acks[env] = ack

        all_monitors_ack = all(all_acks.values())
        logger.info(
            f"Call monitors - action {action} - status: {all_monitors_ack}")
        return all_monitors_ack

    async def call_scenario(self, uid, action, topology, address):
        logger.info(f"Calling Experiment - {action}")

        scenario = self.serialize_bytes(topology)
        deploy = Workflow(id=uid, action=action, scenario=scenario)
        deploy.timestamp.FromDatetime(datetime.now())

        host, port = address.split(":")

        try:
            channel = Channel(host, port)
            stub = ScenarioStub(channel)
            status = await stub.Establish(deploy)

        except Exception as e:
            ack = False
            info = repr(e)
            logger.info(
                f"Error - deploy topology in environment failed - exceptio {info}"
            )
        else:
            if status.error:
                ack = False
                logger.info(f"Experiment not deployed error: {status.error}")
                info = status.error
            else:
                ack = True
                if status.info:
                    info = self.parse_bytes(status.info)
                else:
                    info = {}
                logger.info(f"Experiment info: {info}")
        finally:
            channel.close()

        return ack, info

    async def call_scenarios(self, uid, topology, action):
        envs = topology.get_environments()
        topo_envs = topology.build_environments()

        logger.info(f"Calling scenarios - {action}")
        logger.info(f"Environment scenarios - {envs}")
        logger.debug(f"Environment topologies - {topo_envs}")

        acks = {}
        envs_topo_info = {}

        for env in topo_envs:
            if env in envs:
                env_data = envs.get(env)

                env_components = env_data.get("components")
                scenario_component = env_components.get("scenario")
                env_address = scenario_component.get("address")

                env_topo = topo_envs.get(env)

                ack, topo_info = await self.call_scenario(
                    uid, action, env_topo, env_address)

                acks[env] = ack
                envs_topo_info[env] = topo_info

        if all(acks.values()):
            logger.info(f"All environment scenarios deployed - {acks}")
        else:
            logger.info(f"Environment scenarios error - {acks}")

        return acks, envs_topo_info

    def load(self, scenario_message):
        try:
            scenario = self.parse_bytes(scenario_message)
            self.experiment = Experiment("tmp")
            self.experiment.parse(scenario)
            topology = self.experiment.get_topology()
            topology.build()
            self.topology = topology
            ack = True
        except Exception as e:
            logger.info(f"Could not load scenario - exception {repr(e)}")
            ack = False
        finally:
            return ack

    async def start(self, uid):
        topology = self.experiment.get_topology()
        acks, stats = await self.call_scenarios(uid, topology, "start")

        info, error = {}, {}
        if all(acks.values()):
            all_monitors_ack = await self.call_monitors(stats, "start")
            info = stats
        else:
            error = stats

        return info, error

    async def stop(self, uid):
        topology = self.experiment.get_topology()

        acks, stats = await self.call_scenarios(uid, topology, "stop")

        info, error = {}, {}
        if all(acks.values()):
            all_monitors_ack = await self.call_monitors(stats, "stop")
            info = stats
        else:
            error = stats

        return info, error

    def build_report(self, uid, info, error):
        info_msg = self.serialize_bytes(info)
        error_msg = self.serialize_bytes(error)
        report = Report(id=uid, info=info_msg, error=error_msg)
        return report

    async def execute(self, config):
        uid = config.id
        action = config.action
        scenario = config.scenario

        if self.load(scenario):

            info, error = {}, {}

            if action == "start":
                info, error = await self.start(uid)

                if not error:
                    await self.call_events(info)

            elif action == "stop":
                info, error = await self.stop(uid)

            else:
                error = {
                    "Execution error":
                    f"Unkown action ({action}) to execute config"
                }

            report = self.build_report(uid, info, error)

        else:
            error_msg = "scenario could not be parsed/loaded"
            report = Report(id=config.id, error=error_msg)

        return report

    def config_plugins(self):
        logger.info("Configuring Umbra plugins")

        model = self.topology.get_model()
        umbra = self.topology.get_umbra()
        umbra_model_cfgs = umbra.get(model)

        if model == "fabric":
            logger.info("Configuring Fabric plugin")
            settings = umbra_model_cfgs.get("settings")
            configtx = umbra_model_cfgs.get("configtx")
            configsdk = umbra_model_cfgs.get("configsdk")
            chaincode = umbra_model_cfgs.get("chaincode")

            ack_fabric = self.events_fabric.config(settings, configsdk,
                                                   chaincode, configtx)
            if ack_fabric:
                self.plugins["fabric"] = self.events_fabric

        if model == "iroha":
            self.events_iroha.config(umbra_model_cfgs)
            self.plugins["iroha"] = self.events_iroha

        self.events_scenario.config(self.topology)
        self.plugins["scenario"] = self.events_scenario

    async def handle_events(self, events):
        events_calls = {}

        for evs in events.values():
            evs_formatted = {ev_id: ev for ev_id, ev in evs.items()}
            events_calls.update(evs_formatted)

        self.events_results = await self.events_handler.run(events_calls)

    def schedule_plugins(self):
        sched_events = {}

        for name, plugin in self.plugins.items():
            logger.info("Scheduling plugin %s events", name)
            events = self.experiment.events.get_by_category(name)
            logger.info(f"Scheduling {len(events)} events: {events}")
            plugin_sched_evs = plugin.schedule(events)
            sched_events[plugin] = plugin_sched_evs

        return sched_events

    async def call_events(self, info_deploy):
        logger.info("Scheduling events")

        # info_topology = info_deploy.get("topology")
        # info_hosts = info_deploy.get("hosts")
        # topo = self.experiment.get_topology()
        # topo.fill_config(info_topology)
        # topo.fill_hosts_config(info_hosts)
        # self.topology = topo
        self.config_plugins()

        sched_events = self.schedule_plugins()
        # await self.handle_events(sched_events)
        coro_events = self.handle_events(sched_events)
        asyncio.create_task(coro_events)
Beispiel #12
0
def builds():

    temp_dir = "/tmp/umbra/source/examples/fabric/chaincode"
    chaincode_dir = os.path.abspath(os.path.join(temp_dir))

    fab_topo = FabricTopology("local-4orgs-events",
                              chaincode_dir=chaincode_dir)

    experiment = Experiment("local-4orgs-events")
    experiment.set_topology(fab_topo)

    fab_topo.add_network("s1", envid="umbra-default")
    fab_topo.add_network("s2", envid="umbra-default")

    fab_topo.add_networks_link(src="s1", dst="s2")

    domain = "example.com"
    image_tag = "2.2.1"
    ca_tag = "1.4.7.1"

    fab_topo.add_org("org1", domain, policies=org1_policy)
    fab_topo.add_peer("peer0",
                      "org1",
                      anchor=True,
                      profile="nodes",
                      image_tag=image_tag)
    fab_topo.add_peer("peer1", "org1", profile="nodes", image_tag=image_tag)

    fab_topo.add_org("org2", domain, policies=org2_policy)
    fab_topo.add_peer("peer0",
                      "org2",
                      anchor=True,
                      profile="nodes",
                      image_tag=image_tag)
    fab_topo.add_peer("peer1", "org2", profile="nodes", image_tag=image_tag)

    fab_topo.add_org("org3", domain, policies=org3_policy)
    fab_topo.add_peer("peer0",
                      "org3",
                      anchor=True,
                      profile="nodes",
                      image_tag=image_tag)

    fab_topo.add_org("org4", domain, policies=org4_policy)
    fab_topo.add_peer("peer0",
                      "org4",
                      anchor=True,
                      profile="nodes",
                      image_tag=image_tag)

    ord_specs = [
        {
            "Hostname": "orderer",
            "SANS": ["localhost"],
        },
    ]

    fab_topo.add_orderer(
        "orderer",
        domain,
        profile="nodes",
        mode="raft",
        specs=ord_specs,
        policies=orderer_policy,
        image_tag=image_tag,
    )

    fab_topo.add_ca("ca",
                    "org1",
                    domain,
                    "admin",
                    "admin_pw",
                    profile="nodes",
                    image_tag=ca_tag)
    fab_topo.add_ca("ca",
                    "org2",
                    domain,
                    "admin",
                    "admin_pw",
                    profile="nodes",
                    image_tag=ca_tag)
    fab_topo.add_ca("ca",
                    "org3",
                    domain,
                    "admin",
                    "admin_pw",
                    profile="nodes",
                    image_tag=ca_tag)
    fab_topo.add_ca("ca",
                    "org4",
                    domain,
                    "admin",
                    "admin_pw",
                    profile="nodes",
                    image_tag=ca_tag)

    fab_topo.configtx(configtx)
    p1 = "TwoOrgsOrdererGenesis.Consortiums.SampleConsortium.Organizations"
    p2 = "TwoOrgsOrdererGenesis.Orderer.Organizations"
    p3 = "TwoOrgsChannel.Application.Organizations"
    fab_topo.set_configtx_profile(p1, ["org1", "org2", "org3", "org4"])
    fab_topo.set_configtx_profile(p2, ["orderer"])
    fab_topo.set_configtx_profile(p3, ["org1", "org2", "org3", "org4"])

    fab_topo.add_org_network_link("org1", "s1", "links")
    fab_topo.add_org_network_link("org2", "s2", "links")
    fab_topo.add_org_network_link("org3", "s1", "links")
    fab_topo.add_org_network_link("org4", "s2", "links")
    fab_topo.add_org_network_link("orderer", "s1", "links")

    node_resources = fab_topo.create_node_profile(cpus=1,
                                                  memory=1024,
                                                  disk=None)
    link_resources = fab_topo.create_link_profile(bw=1, delay="2ms", loss=None)

    fab_topo.add_node_profile(node_resources, profile="nodes")
    fab_topo.add_link_profile(link_resources, profile="links")

    ev_create_channel = {
        "action": "create_channel",
        "org": "org1",
        "user": "******",
        "orderer": "orderer",
        "channel": "testchannel",
        "profile": "TwoOrgsChannel",
    }

    ev_join_channel_org1 = {
        "action": "join_channel",
        "org": "org1",
        "user": "******",
        "orderer": "orderer",
        "channel": "testchannel",
        "peers": ["peer0", "peer1"],
    }

    ev_join_channel_org2 = {
        "action": "join_channel",
        "org": "org2",
        "user": "******",
        "orderer": "orderer",
        "channel": "testchannel",
        "peers": ["peer0", "peer1"],
    }

    ev_join_channel_org3 = {
        "action": "join_channel",
        "org": "org3",
        "user": "******",
        "orderer": "orderer",
        "channel": "testchannel",
        "peers": ["peer0"],
    }

    ev_join_channel_org4 = {
        "action": "join_channel",
        "org": "org4",
        "user": "******",
        "orderer": "orderer",
        "channel": "testchannel",
        "peers": ["peer0"],
    }

    ev_info_channel = {
        "action": "info_channel",
        "org": "org1",
        "user": "******",
        "channel": "testchannel",
        "peers": ["peer0"],
    }

    ev_info_channel_config = {
        "action": "info_channel_config",
        "org": "org1",
        "user": "******",
        "channel": "testchannel",
        "peers": ["peer0"],
    }

    ev_info_channels = {
        "action": "info_channels",
        "org": "org1",
        "user": "******",
        "peers": ["peer0"],
    }

    ev_info_network = {
        "action": "info_network",
        "orderer": "orderer",
    }

    ev_chaincode_install_org1 = {
        "action": "chaincode_install",
        "org": "org1",
        "user": "******",
        "chaincode_name": "example_cc",
        "chaincode_path": "github.com/example_cc",
        "chaincode_version": "v1.0",
        "peers": ["peer0", "peer1"],
    }

    ev_chaincode_install_org2 = {
        "action": "chaincode_install",
        "org": "org2",
        "user": "******",
        "chaincode_name": "example_cc",
        "chaincode_path": "github.com/example_cc",
        "chaincode_version": "v1.0",
        "peers": ["peer0", "peer1"],
    }

    ev_chaincode_instantiate_org1 = {
        "action": "chaincode_instantiate",
        "org": "org1",
        "user": "******",
        "peers": ["peer1"],
        "channel": "testchannel",
        "chaincode_name": "example_cc",
        "chaincode_args": ["a", "200", "b", "50"],
        "chaincode_version": "v1.0",
    }

    ev_chaincode_instantiate_org2 = {
        "action": "chaincode_instantiate",
        "org": "org2",
        "user": "******",
        "peers": ["peer1"],
        "channel": "testchannel",
        "chaincode_name": "example_cc",
        "chaincode_args": ["a", "200", "b", "50"],
        "chaincode_version": "v1.0",
    }

    ev_chaincode_invoke_org1 = {
        "action": "chaincode_invoke",
        "org": "org1",
        "user": "******",
        "peers": ["peer1"],
        "channel": "testchannel",
        "chaincode_name": "example_cc",
        "chaincode_args": ["a", "b", "100"],
    }

    ev_chaincode_query_org1 = {
        "action": "chaincode_query",
        "org": "org2",
        "user": "******",
        "peers": ["peer1"],
        "channel": "testchannel",
        "chaincode_name": "example_cc",
        "chaincode_args": ["b"],
    }

    ev_chaincode_query_org2 = {
        "action": "chaincode_query",
        "org": "org2",
        "user": "******",
        "peers": ["peer1"],
        "channel": "testchannel",
        "chaincode_name": "example_cc",
        "chaincode_args": ["b"],
    }

    # IMPORTANT: for the moment Fabric 2.0+ is not supported by fabric-python-sdk
    # TODO: Update events when fabric-python-sdk announces support to Fabric 2.0+

    experiment.add_event(sched={"from": 1},
                         category="fabric",
                         event=ev_info_channels)
    # experiment.add_event("1", "fabric", ev_create_channel)
    # experiment.add_event("3", "fabric", ev_join_channel_org1)
    # experiment.add_event("3", "fabric", ev_join_channel_org2)
    # experiment.add_event("3", "fabric", ev_join_channel_org3)
    # experiment.add_event("3", "fabric", ev_join_channel_org4)
    # experiment.add_event("4", "fabric", ev_info_channel)
    # experiment.add_event("5", "fabric", ev_info_channel_config)
    # experiment.add_event("6", "fabric", ev_info_channels)
    # experiment.add_event("7", "fabric", ev_info_network)
    # experiment.add_event("8", "fabric", ev_chaincode_install_org1)
    # experiment.add_event("8", "fabric", ev_chaincode_install_org2)
    # experiment.add_event("10", "fabric", ev_chaincode_instantiate_org1)
    # experiment.add_event("10", "fabric", ev_chaincode_instantiate_org2)
    # experiment.add_event("20", "fabric", ev_chaincode_invoke_org1)
    # experiment.add_event("30", "fabric", ev_chaincode_query_org1)
    # experiment.add_event("32", "fabric", ev_chaincode_query_org2)

    # Save config file
    experiment.save()