def test_scenario_start_stop(self): sc = Scenario("") scenario_config = load_file("./Fabric-Simple-01.json") scenario = DesignScenario("") ack = scenario.parse(scenario_config) assert ack is True topology = scenario.get_topology() topology.build() envs = topology.get_environments() topo_envs = topology.build_environments() logger.info(f"topology envs {envs}") topology_dict = topo_envs.get("env156") logger.info(json.dumps(topology_dict, sort_keys=True, indent=4)) logger.info("Starting topology") reply = asyncio.run(sc.play("1", "start", topology_dict)) ok, msg = reply logger.info(f"topology started {ok}") print(json.dumps(msg, sort_keys=True, indent=4)) logger.info("Stopping topology") reply = asyncio.run(sc.play("1", "stop", topology_dict)) ok, msg = reply logger.info(f"topology stoped {ok}") logger.info(json.dumps(msg, sort_keys=True, indent=4))
async def call_events(self, scenario, info_deploy): logger.info("Scheduling events") self.scenario = Scenario(None, None, None) self.scenario.parse(scenario) info_topology = info_deploy.get("topology") info_hosts = info_deploy.get("hosts") topo = self.scenario.get_topology() topo.fill_config(info_topology) topo.fill_hosts_config(info_hosts) self.topology = topo self.config_plugins() events = scenario.get("events") self.schedule_plugins(events)
def test_scenario_parse(self): filename = "./fixtures/Fabric-Simple-01.json" filepath = self.filepath(filename) config_dict = self.load(filepath) config_str = json.dumps(config_dict) config_bytes = config_str.encode("utf32") config_msg = Config(id=filename, scenario=config_bytes) config_msg.timestamp.FromDatetime(datetime.now()) request_scenario = config_msg.scenario scenario_dict = self.parse_bytes(request_scenario) scenario = Scenario("tmp") scenario.parse(scenario_dict) topology = scenario.get_topology() topology.show() topo_envs = topology.build_environments() print(topo_envs) envs = topology.get_environments() print(envs)
class Operator: def __init__(self, info): self.info = info self.scenario = None self.topology = None self.events_fabric = FabricEvents() self.plugins = {} def parse_bytes(self, msg): msg_dict = {} if type(msg) is bytes: msg_str = msg.decode('utf32') msg_dict = json.loads(msg_str) return msg_dict def serialize_bytes(self, msg): msg_bytes = b'' if type(msg) is dict: msg_str = json.dumps(msg) msg_bytes = msg_str.encode('utf32') return msg_bytes async def call_scenario(self, test, command, topology, address): logger.info(f"Deploying Scenario - {command}") scenario = self.serialize_bytes(topology) deploy = Deploy(id=test, workflow=command, scenario=scenario) deploy.timestamp.FromDatetime(datetime.now()) host, port = address.split(":") channel = Channel(host, port) stub = ScenarioStub(channel) built = await stub.Run(deploy) if built.error: ack = False logger.info(f'Scenario not deployed error: {built.error}') else: ack = True logger.info(f'Scenario deployed: {built.ok}') info = self.parse_bytes(built.info) channel.close() return ack, info def config_plugins(self): logger.info("Configuring Umbra plugins") umbra_cfgs = self.topology.umbra plugin = umbra_cfgs.get("plugin") if plugin == "fabric": logger.info("Configuring Fabric plugin") topology = umbra_cfgs.get("topology") configtx = umbra_cfgs.get("configtx") configsdk = umbra_cfgs.get("configsdk") chaincode = umbra_cfgs.get("chaincode") ack_fabric = self.events_fabric.config(topology, configsdk, chaincode, configtx) if ack_fabric: self.plugins["fabric"] = self.events_fabric def schedule_plugins(self, events): for name, plugin in self.plugins.items(): logger.info("Scheduling plugin %s events", name) plugin.schedule(events) async def call_events(self, scenario, info_deploy): logger.info("Scheduling events") self.scenario = Scenario(None, None, None) self.scenario.parse(scenario) info_topology = info_deploy.get("topology") info_hosts = info_deploy.get("hosts") topo = self.scenario.get_topology() topo.fill_config(info_topology) topo.fill_hosts_config(info_hosts) self.topology = topo self.config_plugins() events = scenario.get("events") self.schedule_plugins(events) async def run(self, request): logger.info("Running config request") report = Report(id=request.id) request_scenario = request.scenario # logger.debug(f"Received scenario: {request_scenario}") scenario = self.parse_bytes(request_scenario) if scenario: topology = scenario.get("topology") address = scenario.get("entrypoint") ack, topo_info = await self.call_scenario(request.id, "start", topology, address) if ack: events_info = await self.call_events(scenario, topo_info) status_info = { 'topology': topo_info, 'events': events_info, } status_bytes = self.serialize_bytes(status_info) report.status = status_bytes else: ack, topo_info = await self.call_scenario( request.id, "stop", {}, address) return report
class Operator: def __init__(self, info): self.info = info self.scenario = None self.topology = None self.events_fabric = FabricEvents() self.events_env = EnvEventHandler() self.plugins = {} self.agent_plugin = {} def parse_bytes(self, msg): msg_dict = {} if type(msg) is bytes: msg_str = msg.decode('utf32') msg_dict = json.loads(msg_str) return msg_dict def serialize_bytes(self, msg): msg_bytes = b'' if type(msg) is dict: msg_str = json.dumps(msg) msg_bytes = msg_str.encode('utf32') return msg_bytes def config_agent(self, deployed_topo, scenario): """ Get agent(s) from 'scenario' and find its corresponding IP:PORT from the 'deployed_topo' Arguments: deployed_topo {dict} -- deployed topology from umbra-scenario scenario {dict} -- the user-defined scenario """ logger.info("Configuring umbra-agent plugin") umbra_topo = scenario.get("umbra").get("topology") agents = umbra_topo.get("agents") deployed_hosts = deployed_topo.get("topology").get("hosts") for hostname, host_val in deployed_hosts.items(): # tiny hack: e.g. umbraagent.example.com, strip the ".example.com" subdomain = hostname.split('.')[0] if subdomain in agents.keys(): agent_ip = host_val.get("host_ip") self.agent_plugin[subdomain] = agent_ip + ":" + str(AGENT_PORT) logger.info("Added agent: agent_name = %s, at %s:%s", subdomain, agent_ip, AGENT_PORT) async def call_scenario(self, test, command, topology, address): logger.info(f"Deploying Scenario - {command}") scenario = self.serialize_bytes(topology) deploy = Workflow(id=test, command=command, scenario=scenario) deploy.timestamp.FromDatetime(datetime.now()) host, port = address.split(":") channel = Channel(host, port) stub = ScenarioStub(channel) status = await stub.Establish(deploy) if status.error: ack = False logger.info(f'Scenario not deployed error: {status.error}') else: ack = True logger.info(f'Scenario deployed: {status.ok}') info = self.parse_bytes(status.info) channel.close() return ack, info def config_plugins(self): logger.info("Configuring Umbra plugins") umbra_cfgs = self.topology.umbra plugin = umbra_cfgs.get("plugin") if plugin == "fabric": logger.info("Configuring Fabric plugin") topology = umbra_cfgs.get("topology") configtx = umbra_cfgs.get("configtx") configsdk = umbra_cfgs.get("configsdk") chaincode = umbra_cfgs.get("chaincode") ack_fabric = self.events_fabric.config(topology, configsdk, chaincode, configtx) if ack_fabric: self.plugins["fabric"] = self.events_fabric def schedule_plugins(self, events): for name, plugin in self.plugins.items(): logger.info("Scheduling plugin %s events", name) plugin.schedule(events) async def call_events(self, scenario, info_deploy): logger.info("Scheduling events") self.scenario = Scenario(None, None, None) self.scenario.parse(scenario) info_topology = info_deploy.get("topology") info_hosts = info_deploy.get("hosts") topo = self.scenario.get_topology() topo.fill_config(info_topology) topo.fill_hosts_config(info_hosts) self.topology = topo logger.debug("DOT: %s", self.topology.to_dot()) self.config_plugins() events = scenario.get("events_fabric") self.schedule_plugins(events) def config_env_event(self, wflow_id): self.events_env.config(self.scenario.entrypoint.get("umbra-scenario"), wflow_id) self.plugins["environment"] = self.events_env async def call_env_event(self, wflow_id, scenario): logger.info("Scheduling environment events...") self.config_env_event(wflow_id) env_events = scenario.get("events_others").get("environment") # Any better way to get the id of event=current_topology? # Need it as the key to the 'result' dict which has # the response of the query for current topology curr_topo_id = None for event in env_events: if event["command"] == "current_topology": curr_topo_id = event["id"] result = await self.events_env.handle(env_events) # BUG: what if you have > 1 current_topology events? Above # `await` will block until you receive results from all tasks. # Correct behavior would be to straightaway update topology # after querying topology from umbra-scenario # update the topology with the newly received topology if curr_topo_id: topo = self.scenario.get_topology() updated_topo = result[curr_topo_id][1].get("topology") updated_host = result[curr_topo_id][1].get("hosts") topo.fill_config(updated_topo) topo.fill_hosts_config(updated_host) self.topology = topo logger.debug("DOT: %s", self.topology.to_dot()) return result async def call_agent_event(self, scenario): logger.info("Scheduling agent events...") agent_events = scenario.get("events_others").get("agent") # '[0]' because we assume only single agent exist, thus all # events should have the same "agent_name" agent_name = agent_events[0].get("agent_name") # extract all the actions from agent_events to # construct the Instruction message agent_actions = [] for ev in agent_events: for action in ev.get("actions"): agent_actions.append(action) instr_dict = {"id": scenario.get("id"), "actions": agent_actions} ip, port = self.agent_plugin[agent_name].split(':') channel = Channel(ip, int(port)) stub = AgentStub(channel) instruction = json_format.ParseDict(instr_dict, Instruction()) reply = await stub.Probe(instruction) channel.close() async def call_monitor_event(self, scenario): logger.info("Scheduling monitor events...") monitor_events = scenario.get("events_others").get("monitor") # extract all the actions from monitor_events to # construct the Instruction message monitor_actions = [] for ev in monitor_events: for action in ev.get("actions"): monitor_actions.append(action) instr_dict = {"id": scenario.get("id"), "actions": monitor_actions} ip, port = self.scenario.entrypoint.get("umbra-monitor").split(':') channel = Channel(ip, int(port)) stub = MonitorStub(channel) instruction = json_format.ParseDict(instr_dict, Instruction()) reply = await stub.Listen(instruction) channel.close() async def run(self, request): logger.info("Running config request") report = Report(id=request.id) request_scenario = request.scenario # logger.debug(f"Received scenario: {request_scenario}") scenario = self.parse_bytes(request_scenario) if scenario: topology = scenario.get("topology") address = scenario.get("entrypoint").get("umbra-scenario") ack, topo_info = await self.call_scenario(request.id, "start", topology, address) self.config_agent(topo_info, topology) if ack: events_info = await self.call_events(scenario, topo_info) status_info = { 'topology': topo_info, 'events': events_info, } status_bytes = self.serialize_bytes(status_info) report.status = status_bytes await asyncio.gather(self.call_agent_event(scenario), self.call_monitor_event(scenario), self.call_env_event(request.id, scenario)) else: ack, topo_info = await self.call_scenario( request.id, "stop", {}, address) return report
def build_simple_fabric_cfg(): temp_dir = "./fabric_configs" configs_dir = os.path.abspath( os.path.join(os.path.dirname(__file__), temp_dir)) temp_dir = "./chaincode" chaincode_dir = os.path.abspath( os.path.join(os.path.dirname(__file__), temp_dir)) # Defines Fabric Topology - main class to have orgs/peers/cas/orderers added fab_topo = FabricTopology('fabric_simple', configs_dir, chaincode_dir) # Defines scenario containing topology, so events can be added entrypoint = "172.17.0.1:8988" scenario = Scenario(id="Fabric-Simple-01", entrypoint=entrypoint, folder=configs_dir) scenario.set_topology(fab_topo) domain = "example.com" image_tag = "latest.1" ca_tag = "latest.1" fab_topo.add_org("org1", domain, policies=org1_policy) fab_topo.add_peer("peer0", "org1", anchor=True, image_tag=image_tag) fab_topo.add_peer("peer1", "org1", image_tag=image_tag) fab_topo.add_org("org2", domain, policies=org2_policy) fab_topo.add_peer("peer0", "org2", anchor=True, image_tag=image_tag) fab_topo.add_peer("peer1", "org2", image_tag=image_tag) fab_topo.add_org("org3", domain, policies=org3_policy) fab_topo.add_peer("peer0", "org3", anchor=True, image_tag=image_tag) fab_topo.add_org("org4", domain, policies=org4_policy) fab_topo.add_peer("peer0", "org4", anchor=True, image_tag=image_tag) ord_specs = [ { "Hostname": "orderer", "SANS": ["localhost"], }, ] fab_topo.add_orderer("orderer", domain, mode="solo", specs=ord_specs, policies=orderer_policy, image_tag=image_tag) fab_topo.add_ca("ca", "org1", domain, "admin", "admin_pw", image_tag=ca_tag) fab_topo.add_ca("ca", "org2", domain, "admin", "admin_pw", image_tag=ca_tag) fab_topo.add_ca("ca", "org3", domain, "admin", "admin_pw", image_tag=ca_tag) fab_topo.add_ca("ca", "org4", domain, "admin", "admin_pw", image_tag=ca_tag) # Configtx quick fixes - checks which paths from configtx needs to have full org desc fab_topo.configtx(configtx) p1 = "TwoOrgsOrdererGenesis.Consortiums.SampleConsortium.Organizations" p2 = "TwoOrgsOrdererGenesis.Orderer.Organizations" p3 = "TwoOrgsChannel.Application.Organizations" fab_topo.set_configtx_profile(p1, ["org1", "org2", "org3", "org4"]) fab_topo.set_configtx_profile(p2, ["orderer"]) fab_topo.set_configtx_profile(p3, ["org1", "org2", "org3", "org4"]) # Creates all config files - i.e., crypto-config configtx config-sdk fab_topo.build_configs() # Creates the network topology - orgs/nodes and links fab_topo.add_network("s0") fab_topo.add_org_network_link("org1", "s0", "E-Line") fab_topo.add_org_network_link("org2", "s0", "E-Line") fab_topo.add_org_network_link("org3", "s0", "E-Line") fab_topo.add_org_network_link("org4", "s0", "E-Line") fab_topo.add_org_network_link("orderer", "s0", "E-Line") # Defines resources for nodes and links node_resources = fab_topo.create_node_profile(cpus=1, memory=1024, disk=None) link_resources = fab_topo.create_link_profile(bw=1, delay='2ms', loss=None) fab_topo.add_node_profile(node_resources, node_type="container") fab_topo.add_link_profile(link_resources, link_type="E-Line") # topo_built = fab_topo.build() # print(topo_built) # fab_topo.show() ev_create_channel = { "action": "create_channel", "org": "org1", "user": "******", "orderer": "orderer", "channel": "testchannel", "profile": "TwoOrgsChannel", } ev_join_channel_org1 = { "action": "join_channel", "org": "org1", "user": "******", "orderer": "orderer", "channel": "testchannel", "peers": ["peer0", "peer1"], } ev_join_channel_org2 = { "action": "join_channel", "org": "org2", "user": "******", "orderer": "orderer", "channel": "testchannel", "peers": ["peer0", "peer1"], } ev_join_channel_org3 = { "action": "join_channel", "org": "org3", "user": "******", "orderer": "orderer", "channel": "testchannel", "peers": ["peer0"], } ev_join_channel_org4 = { "action": "join_channel", "org": "org4", "user": "******", "orderer": "orderer", "channel": "testchannel", "peers": ["peer0"], } ev_info_channel = { "action": "info_channel", "org": "org1", "user": "******", "channel": "testchannel", "peers": ["peer0"], } ev_info_channel_config = { "action": "info_channel_config", "org": "org1", "user": "******", "channel": "testchannel", "peers": ["peer0"], } ev_info_channels = { "action": "info_channels", "org": "org1", "user": "******", "peers": ["peer0"], } ev_info_network = { "action": "info_network", "orderer": "orderer", } ev_chaincode_install_org1 = { "action": "chaincode_install", "org": "org1", "user": "******", "chaincode_name": "example_cc", "chaincode_path": "github.com/example_cc", "chaincode_version": "v1.0", "peers": ["peer0", "peer1"], } ev_chaincode_install_org2 = { "action": "chaincode_install", "org": "org2", "user": "******", "chaincode_name": "example_cc", "chaincode_path": "github.com/example_cc", "chaincode_version": "v1.0", "peers": ["peer0", "peer1"], } ev_chaincode_instantiate_org1 = { "action": "chaincode_instantiate", "org": "org1", "user": "******", "peers": ["peer1"], "channel": "testchannel", "chaincode_name": "example_cc", "chaincode_args": ['a', '200', 'b', '50'], "chaincode_version": "v1.0", } ev_chaincode_instantiate_org2 = { "action": "chaincode_instantiate", "org": "org2", "user": "******", "peers": ["peer1"], "channel": "testchannel", "chaincode_name": "example_cc", "chaincode_args": ['a', '200', 'b', '50'], "chaincode_version": "v1.0", } ev_chaincode_invoke_org1 = { "action": "chaincode_invoke", "org": "org1", "user": "******", "peers": ["peer1"], "channel": "testchannel", "chaincode_name": "example_cc", "chaincode_args": ['a', 'b', '100'], } ev_chaincode_query_org1 = { "action": "chaincode_query", "org": "org2", "user": "******", "peers": ["peer1"], "channel": "testchannel", "chaincode_name": "example_cc", "chaincode_args": ['b'], } ev_chaincode_query_org2 = { "action": "chaincode_query", "org": "org2", "user": "******", "peers": ["peer1"], "channel": "testchannel", "chaincode_name": "example_cc", "chaincode_args": ['b'], } scenario.add_event("0", "fabric", ev_info_channels) scenario.add_event("1", "fabric", ev_create_channel) scenario.add_event("3", "fabric", ev_join_channel_org1) scenario.add_event("3", "fabric", ev_join_channel_org2) scenario.add_event("3", "fabric", ev_join_channel_org3) scenario.add_event("3", "fabric", ev_join_channel_org4) scenario.add_event("4", "fabric", ev_info_channel) scenario.add_event("5", "fabric", ev_info_channel_config) scenario.add_event("6", "fabric", ev_info_channels) scenario.add_event("7", "fabric", ev_info_network) scenario.add_event("8", "fabric", ev_chaincode_install_org1) scenario.add_event("8", "fabric", ev_chaincode_install_org2) scenario.add_event("10", "fabric", ev_chaincode_instantiate_org1) scenario.add_event("10", "fabric", ev_chaincode_instantiate_org2) scenario.add_event("20", "fabric", ev_chaincode_invoke_org1) scenario.add_event("30", "fabric", ev_chaincode_query_org1) scenario.add_event("32", "fabric", ev_chaincode_query_org2) # Save config file scenario.save()
def build_simple_fabric_cfg(): temp_dir = "./fabric_configs" configs_dir = os.path.abspath( os.path.join(os.path.dirname(__file__), temp_dir)) temp_dir = "./chaincode" chaincode_dir = os.path.abspath( os.path.join(os.path.dirname(__file__), temp_dir)) # Defines Fabric Topology - main class to have orgs/peers/cas/orderers added fab_topo = FabricTopology('fabric_simple', configs_dir, chaincode_dir) # Defines scenario containing topology, so events can be added # NOTE: these addr are also hardcoded at run.sh entrypoint = { "umbra-scenario": "172.17.0.1:8988", "umbra-monitor": "172.17.0.1:8990" } scenario = Scenario(id="Fabric-Simple-01", entrypoint=entrypoint, folder=configs_dir) scenario.set_topology(fab_topo) domain = "example.com" image_tag = "1.4.0.1" fab_topo.add_org("org1", domain, policies=org1_policy) fab_topo.add_peer("peer0", "org1", anchor=True, image_tag=image_tag) fab_topo.add_peer("peer1", "org1", image_tag=image_tag) fab_topo.add_org("org2", domain, policies=org2_policy) fab_topo.add_peer("peer0", "org2", anchor=True, image_tag=image_tag) fab_topo.add_peer("peer1", "org2", image_tag=image_tag) fab_topo.add_org("org3", domain, policies=org3_policy) fab_topo.add_peer("peer0", "org3", anchor=True, image_tag=image_tag) fab_topo.add_org("org4", domain, policies=org4_policy) fab_topo.add_peer("peer0", "org4", anchor=True, image_tag=image_tag) agent_image = "umbra-agent" agent_name = "umbraagent" # umbraagent.example.com fab_topo.add_agent(agent_name, domain, image=agent_image) ord_specs = [ { "Hostname": "orderer" }, { "Hostname": "orderer2" }, { "Hostname": "orderer3" }, { "Hostname": "orderer4" }, { "Hostname": "orderer5" }, ] fab_topo.add_orderer("orderer", domain, mode="solo", specs=ord_specs, policies=orderer_policy, image_tag=image_tag) fab_topo.add_ca("ca", "org1", domain, "admin", "admin_pw", image_tag=image_tag) fab_topo.add_ca("ca", "org2", domain, "admin", "admin_pw", image_tag=image_tag) fab_topo.add_ca("ca", "org3", domain, "admin", "admin_pw", image_tag=image_tag) fab_topo.add_ca("ca", "org4", domain, "admin", "admin_pw", image_tag=image_tag) # Configtx quick fixes - checks which paths from configtx needs to have full org desc fab_topo.configtx(configtx) p1 = "TwoOrgsOrdererGenesis.Consortiums.SampleConsortium.Organizations" p2 = "TwoOrgsOrdererGenesis.Orderer.Organizations" p3 = "TwoOrgsChannel.Application.Organizations" fab_topo.set_configtx_profile(p1, ["org1", "org2", "org3", "org4"]) fab_topo.set_configtx_profile(p2, ["orderer"]) fab_topo.set_configtx_profile(p3, ["org1", "org2", "org3", "org4"]) # Creates all config files - i.e., crypto-config configtx config-sdk fab_topo.build_configs() # Creates the network topology - orgs/nodes and links fab_topo.add_network("s0") fab_topo.add_org_network_link("org1", "s0", "E-Line") fab_topo.add_org_network_link("org2", "s0", "E-Line") fab_topo.add_org_network_link("org3", "s0", "E-Line") fab_topo.add_org_network_link("org4", "s0", "E-Line") fab_topo.add_org_network_link("orderer", "s0", "E-Line") fab_topo.add_org_network_link("umbraagent", "s0", "E-Line") # Defines resources for nodes and links node_resources = fab_topo.create_node_profile(cpus=1, memory=1024, disk=None) link_resources = fab_topo.create_link_profile(bw=1, delay='2ms', loss=None) fab_topo.add_node_profile(node_resources, node_type="container") fab_topo.add_link_profile(link_resources, link_type="E-Line") # topo_built = fab_topo.build() # print(topo_built) # fab_topo.show() ev_create_channel = { "action": "create_channel", "org": "org1", "user": "******", "orderer": "orderer", "channel": "testchannel", "profile": "TwoOrgsChannel", } ev_join_channel_org1 = { "action": "join_channel", "org": "org1", "user": "******", "orderer": "orderer", "channel": "testchannel", "peers": ["peer0", "peer1"], } ev_join_channel_org2 = { "action": "join_channel", "org": "org2", "user": "******", "orderer": "orderer", "channel": "testchannel", "peers": ["peer0", "peer1"], } ev_join_channel_org3 = { "action": "join_channel", "org": "org3", "user": "******", "orderer": "orderer", "channel": "testchannel", "peers": ["peer0"], } ev_join_channel_org4 = { "action": "join_channel", "org": "org4", "user": "******", "orderer": "orderer", "channel": "testchannel", "peers": ["peer0"], } ev_info_channel = { "action": "info_channel", "org": "org1", "user": "******", "channel": "testchannel", "peers": ["peer0"], } ev_info_channel_config = { "action": "info_channel_config", "org": "org1", "user": "******", "channel": "testchannel", "peers": ["peer0"], } ev_info_channels = { "action": "info_channels", "org": "org1", "user": "******", "peers": ["peer0"], } ev_info_network = { "action": "info_network", "orderer": "orderer", } ev_chaincode_install_org1 = { "action": "chaincode_install", "org": "org1", "user": "******", "chaincode_name": "example_cc", "chaincode_path": "github.com/example_cc", "chaincode_version": "v1.0", "peers": ["peer0", "peer1"], } ev_chaincode_install_org2 = { "action": "chaincode_install", "org": "org2", "user": "******", "chaincode_name": "example_cc", "chaincode_path": "github.com/example_cc", "chaincode_version": "v1.0", "peers": ["peer0", "peer1"], } ev_chaincode_instantiate_org1 = { "action": "chaincode_instantiate", "org": "org1", "user": "******", "peers": ["peer1"], "channel": "testchannel", "chaincode_name": "example_cc", "chaincode_args": ['a', '200', 'b', '50'], "chaincode_version": "v1.0", } ev_chaincode_instantiate_org2 = { "action": "chaincode_instantiate", "org": "org2", "user": "******", "peers": ["peer1"], "channel": "testchannel", "chaincode_name": "example_cc", "chaincode_args": ['a', '200', 'b', '50'], "chaincode_version": "v1.0", } ev_chaincode_invoke_org1 = { "action": "chaincode_invoke", "org": "org1", "user": "******", "peers": ["peer1"], "channel": "testchannel", "chaincode_name": "example_cc", "chaincode_args": ['a', 'b', '100'], } ev_chaincode_query_org1 = { "action": "chaincode_query", "org": "org1", "user": "******", "peers": ["peer1"], "channel": "testchannel", "chaincode_name": "example_cc", "chaincode_args": ['b'], } ev_chaincode_query_org2 = { "action": "chaincode_query", "org": "org2", "user": "******", "peers": ["peer1"], "channel": "testchannel", "chaincode_name": "example_cc", "chaincode_args": ['b'], } ev_kill_container_peer0_org2 = { "command": "environment_event", "target_node": "peer0.org2.example.com", "action": "kill_container", "action_args": {}, } ev_mem_limit_peer1_org1 = { "command": "environment_event", "action": "update_memory_limit", "action_args": { "mem_limit": 256000000, "memswap_limit": -1 }, "target_node": "peer1.org1.example.com", } ev_cpu_limit_peer1_org2 = { "command": "environment_event", "target_node": "peer1.org2.example.com", "action": "update_cpu_limit", "action_args": { "cpu_quota": 10000, "cpu_period": 50000, "cpu_shares": -1, "cores": {} }, } """ $ tc qdisc show dev s0-eth2 qdisc htb 5: root refcnt 2 r2q 10 default 1 direct_packets_stat 0 direct_qlen 1000 qdisc netem 10: parent 5:1 limit 1000 delay 4.0ms loss 10% $ tc qdisc show dev s0-eth5 qdisc htb 5: root refcnt 2 r2q 10 default 1 direct_packets_stat 0 direct_qlen 1000 qdisc netem 10: parent 5:1 limit 1000 delay 4.0ms loss 10% """ ev_update_link_res = { "command": "environment_event", "action": "update_link", "action_args": { "events": [ { "group": "links", "specs": { "action": "update", "online": True, "resources": { "bw": 3, "delay": "4ms", "loss": 10, } }, "targets": ("s0", "peer1.org1.example.com") }, { "group": "links", "specs": { "action": "update", "online": True, "resources": { "bw": 3, "delay": "4ms", "loss": 10, } }, "targets": ("s0", "peer0.org3.example.com") }, ] }, } # Find peer1.org1.example.com connection name from scenario.log # In this case, it is s0-eth2 # $ ip link show s0-eth2 # ensure state DOWN # ... mtu 1500 qdisc htb master ovs-system state DOWN ev_update_link_peer1_org1_downlink = { "command": "environment_event", "action": "update_link", "action_args": { "events": [ { "group": "links", "specs": { "action": "update", "online": False, "resources": None }, "targets": ("s0", "peer1.org1.example.com") }, ] }, } ev_update_link_peer1_org1_uplink = { "command": "environment_event", "action": "update_link", "action_args": { "events": [ { "group": "links", "specs": { "action": "update", "online": True, "resources": { "bw": 1, "delay": "2ms", # "loss": None, } }, "targets": ("s0", "peer1.org1.example.com") }, ] }, } ev_update_link_orderer_down = { "command": "environment_event", "action": "update_link", "action_args": { "events": [ { "group": "links", "specs": { "action": "update", "online": False, "resources": None }, "targets": ("s0", "orderer.example.com") }, ] }, } ev_update_link_orderer_up = { "command": "environment_event", "action": "update_link", "action_args": { "events": [ { "group": "links", "specs": { "action": "update", "online": True, "resources": { "bw": 1, "delay": "2ms", # "loss": None, } }, "targets": ("s0", "orderer.example.com") }, ] }, } ev_agent_ping_peer0org1 = { "agent_name": agent_name, "id": "100", "actions": [ { 'id': "1", "tool": "ping", "output": { "live": False, "address": None, }, 'parameters': { "target": "peer0.org1.example.com", "interval": "1", "duration": "4", }, 'schedule': { "from": 1, "until": 0, "duration": 0, "interval": 0, "repeat": 0 }, }, ], } ev_monitor_container_peer0org1 = { "id": "101", "actions": [ { 'id': "2", "tool": "container", "output": { "live": False, "address": None, }, 'parameters': { "target": "peer0.org1.example.com", "interval": "1", "duration": "1", }, 'schedule': { "from": 2, "until": 0, "duration": 0, "interval": 0, "repeat": 0 }, }, ], } ev_get_topology = { "command": "current_topology", } scenario.add_event_fabric("0", "fabric", ev_info_channels) scenario.add_event_fabric("1", "fabric", ev_create_channel) scenario.add_event_others(1, "agent", ev_agent_ping_peer0org1) scenario.add_event_others(3, "monitor", ev_monitor_container_peer0org1) # scenario.add_event_others(3, "environment", ev_kill_container_peer0_org1) # scenario.add_event_others(4, "environment", ev_kill_container_peer0_org2) # scenario.add_event_others(6, "environment", ev_update_link_res) # scenario.add_event_others(3, "environment", ev_update_link_peer1_org1_downlink) # scenario.add_event_others(3, "environment", ev_update_link_peer1_org1_uplink) # scenario.add_event_others(1, "environment", ev_update_link_orderer_down) # scenario.add_event_others(2, "environment", ev_update_link_orderer_up) scenario.add_event_others(1, "environment", ev_mem_limit_peer1_org1) scenario.add_event_others(1, "environment", ev_cpu_limit_peer1_org2) scenario.add_event_others(3, "environment", ev_get_topology) scenario.add_event_fabric("3", "fabric", ev_join_channel_org1) scenario.add_event_fabric("3", "fabric", ev_join_channel_org2) scenario.add_event_fabric("3", "fabric", ev_join_channel_org3) scenario.add_event_fabric("3", "fabric", ev_join_channel_org4) scenario.add_event_fabric("5", "fabric", ev_info_channel) # scenario.add_event_fabric("5", "fabric", ev_info_channel_config) scenario.add_event_fabric("9", "fabric", ev_info_channels) scenario.add_event_fabric("10", "fabric", ev_info_network) scenario.add_event_fabric("11", "fabric", ev_chaincode_install_org1) scenario.add_event_fabric("11", "fabric", ev_chaincode_install_org2) scenario.add_event_fabric("13", "fabric", ev_chaincode_instantiate_org1) scenario.add_event_fabric("13", "fabric", ev_chaincode_instantiate_org2) scenario.add_event_fabric("23", "fabric", ev_chaincode_invoke_org1) scenario.add_event_fabric("35", "fabric", ev_chaincode_query_org1) scenario.add_event_fabric("37", "fabric", ev_chaincode_query_org2) # Save config file scenario.save() fab_topo.show()