def run(number_of_hosts):
    setLogLevel('info')
    os.system('sudo rm -r EVAL-NXT-h*')
    NXT_config(number_of_hosts)
    passphrases = {"h{}".format(i):(i-1)%10 for i in range(1, number_of_hosts+1)}
    unique = time.strftime("%d_%m_%Y_%H:%M", time.localtime())
    script_name = "{}_star_topology_NXT".format(str(number_of_hosts))
    print("{}: started at {}".format(script_name, unique))
    hosts = ["h{}".format(i) for i in range(1, number_of_hosts+1)]
    topo = StarTopo(number_of_hosts)
    net = Mininet(topo, autoSetMacs=True)
    net.start()
    configNetParams(net, hosts)
    # I run only one sniffer otherwise it doesn't work well with many tcpdumps open; and also because the traffic is the same on all interfaces.
    net.nameToNode['h1'].cmd("tcpdump -s0 -i h1-eth0 -U -w Captures/traffic_h1_{}_{}.pcap &".format(script_name, unique))
    # the sleeps are to make tcpdump log well before traffic generation and before stop (stopping the net kills the processes that are writing to log file)
    time.sleep(2)
    for host in hosts:
        net.nameToNode[host].cmd("cd EVAL-NXT-{}".format(host))
        net.nameToNode[host].cmd("nohup timeout 300 ./run.sh > ../Nodes_Logs/log_{}_{}_{} &".format(host, script_name, unique))
    # wait for initialization
    print('Waiting for initialization of NXT nodes')
    time.sleep(20)
    print('Inizialization done')
    net.nameToNode['h1'].cmd("../Util/monitor_processes.sh ../Proc_Logs/processes_log_{}_{} &".format(script_name, unique))
    # other commands here
    for host in hosts:
        net.nameToNode[host].cmd("curl --data 'secretPhrase={}' http://{}:6876/nxt?requestType=startForging".format(passphrases[host], hostToIp(host)))
    # leave the nodes some time forging
    time.sleep(180)
    for host in hosts:
        try:
            net.nameToNode[host].cmd("curl --data 'secretPhrase={}' http://{}:6876/nxt?requestType=stopForging".format(passphrases[host], hostToIp(host)))
        except:
            pass
    ############
    time.sleep(5)
    resetNetParams(net, hosts)
    net.stop()

    proc_log_name = 'processes_log_{}_{}'.format(script_name, unique)
    log_analysis("Proc_Logs/{}".format(proc_log_name), ["java"])
    make_plots('Analysis/analysis_{}.csv'.format(proc_log_name))
    tshark_output_to_CSV('Analysis/traffic_volume_{}_{}.csv'.format(script_name, unique), 'Captures/traffic_h1_{}_{}.pcap'.format(script_name, unique), 'websocket')
    return "{}_{}".format(script_name, unique)
def run(number_of_hosts):
    setLogLevel('info')
    eth_nodes = ethereum_config(number_of_hosts)
    host_to_ethAddr = lambda host: eth_nodes["node{}".format(host[1:])]
    unique = time.strftime("%d_%m_%Y_%H:%M", time.localtime())
    script_name = "{}_star_topology_ETH".format(str(number_of_hosts))
    print("{}: started at {}".format(script_name, unique))
    hosts = ["h{}".format(i) for i in range(1, number_of_hosts + 1)]
    topo = StarTopo(number_of_hosts)
    net = Mininet(topo, autoSetMacs=True)
    net.start()
    configNetParams(net, hosts)
    # I run only one sniffer otherwise it doesn't work well with many tcpdumps open; and also because the traffic is the same on all interfaces.
    net.nameToNode['h1'].cmd(
        "tcpdump -s0 -i h1-eth0 -U -w Captures/traffic_h1_{}_{}.pcap &".format(
            script_name, unique))
    # the sleeps are to make tcpdump log well before traffic generation and before stop (stopping the net kills the processes that are writing to log file)
    time.sleep(2)
    for host in hosts:
        net.nameToNode[host].cmd("cd ethnet")
    for host in hosts:
        net.nameToNode[host].cmd(
            "nohup timeout 300 geth --ipcdisable --datadir node{}/ --syncmode 'full' --port 30310 --nat extip:{} --http --http.addr '{}' --http.port 8501 --http.api 'personal,debug,eth,net,web3,txpool,miner' --networkid 700 -unlock '{}' --password pwdfile --mine --miner.gasprice '1' --miner.gaslimit '94000000' --miner.gastarget '1' --allow-insecure-unlock --nodiscover > ../Nodes_Logs/log_{}_{}_{} &"
            .format(host[1:], hostToIp(host), hostToIp(host),
                    host_to_ethAddr(host), host, script_name, unique))
    # wait for initialization
    print('Waiting for initialization of ETH nodes')
    time.sleep(20)
    print('Inizialization done')
    net.nameToNode['h1'].cmd(
        "../Util/monitor_processes.sh ../Proc_Logs/processes_log_{}_{} &".
        format(script_name, unique))
    # other commands here
    time.sleep(180)
    ############
    time.sleep(5)
    resetNetParams(net, hosts)
    net.stop()
    proc_log_name = 'processes_log_{}_{}'.format(script_name, unique)
    log_analysis("Proc_Logs/{}".format(proc_log_name), ["geth"])
    make_plots('Analysis/analysis_{}.csv'.format(proc_log_name))
    tshark_output_to_CSV(
        'Analysis/traffic_volume_{}_{}.csv'.format(script_name, unique),
        'Captures/traffic_h1_{}_{}.pcap'.format(script_name, unique))
    return "{}_{}".format(script_name, unique)
def run(depth, number_of_TX):
    setLogLevel('info')
    os.system('sudo rm -r EVAL-NXT-h*')
    number_of_hosts = 2**depth
    NXT_config(number_of_hosts)
    # you can change passphrases
    passphrases = {
        "h{}".format(i): (i - 1) % 10
        for i in range(1, number_of_hosts + 1)
    }
    unique = time.strftime("%d_%m_%Y_%H:%M", time.localtime())
    script_name = "tree_{}_depth_{}_transactions_NXT".format(
        str(depth), str(number_of_TX))
    print("{}: started at {}".format(script_name, unique))
    hosts = ["h{}".format(i) for i in range(1, number_of_hosts + 1)]
    topo = TreeTopo(depth)
    net = Mininet(topo, autoSetMacs=True)
    net.start()
    net_hosts = ["h{}".format(i) for i in range(1, len(net.hosts) + 1)]
    configNetParams(net, net_hosts)
    # I run only one sniffer otherwise it doesn't work well with many tcpdumps open; and also because the traffic is the same on all interfaces.
    net.nameToNode['h1'].cmd(
        "tcpdump -s0 -i h1-eth0 -U -w Captures/traffic_h1_{}_{}.pcap &".format(
            script_name, unique))
    # the sleeps are to make tcpdump log well before traffic generation and before stop (stopping the net kills the processes that are writing to log file)
    time.sleep(2)
    for host in hosts:
        net.nameToNode[host].cmd("cd EVAL-NXT-{}".format(host))
        net.nameToNode[host].cmd(
            "nohup timeout 300 ./run.sh > ../Nodes_Logs/log_{}_{}_{} &".format(
                host, script_name, unique))
    # wait for initialization
    print('Waiting for initialization of NXT nodes')
    time.sleep(20)
    print('Inizialization done')
    net.nameToNode['h1'].cmd(
        "../Util/monitor_processes.sh ../Proc_Logs/processes_log_{}_{} &".
        format(script_name, unique))
    # other commands here
    for host in hosts:
        net.nameToNode[host].cmd(
            "curl --data 'secretPhrase={}' http://{}:6876/nxt?requestType=startForging"
            .format(passphrases[host], hostToIp(host)))
    # leave the nodes some time forging and send transactions at regular intervals (a burst every 20 seconds for 10 times)
    # Start observing clients
    total_time = 200
    observers = []
    for host in hosts:
        observer = observingClient(
            total_time, "h{}".format(int(host[1]) + number_of_hosts + 1), net,
            host)
        observers.append(observer)
        observer.start()
    # Generate transactions
    tx_burst = 50
    n_bursts = number_of_TX // tx_burst
    # n_bursts = 10
    # tx_burst = number_of_TX // n_bursts
    amounts = [i * 10**8 for i in [50, 100, 200]]
    fees = [i * 10**8 for i in [1, 5, 10]]
    start = time.time()
    stop_it = False
    for i in range(n_bursts):
        print("Transaction burst number {}".format(i + 1))
        # round nodes
        for j in range(tx_burst):
            host_num = j % len(hosts) + 1
            host_IP = hostToIp('h{}'.format(host_num))
            try:
                net.nameToNode['h{}'.format(number_of_hosts + 1)].cmd(
                    "curl -m 1 --data 'recipient=NXT-2543-6FUN-HS5W-BNVW6&secretPhrase={}&deadline=1440&phased=false&phasingHashedSecretAlgorithm=2&feeNQT={}&amountNQT={}' http://{}:6876/nxt?requestType=sendMoney"
                    .format(j % 10, fees[j % 3], amounts[j % 3], host_IP))
            except:
                print("One transaction not sent to h{} due to some exception".
                      format(host_num))
            elapsed = time.time() - start
            if elapsed >= total_time:
                stop_it = True
                break
        if stop_it:
            break
        time.sleep(total_time // n_bursts)
    for host in hosts:
        try:
            net.nameToNode[host].cmd(
                "curl --data 'secretPhrase={}' http://{}:6876/nxt?requestType=stopForging"
                .format(passphrases[host], hostToIp(host)))
        except:
            pass
    ############
    time.sleep(5)
    try:
        resetNetParams(net, net_hosts)
    except:
        pass
    try:
        net.stop()
    except:
        pass
    proc_log_name = 'processes_log_{}_{}'.format(script_name, unique)
    log_analysis("Proc_Logs/{}".format(proc_log_name), ["java"])
    make_plots('Analysis/analysis_{}.csv'.format(proc_log_name))
    obs_dict = {obs.observedHost: obs.TPS() for obs in observers}
    export_TPS_to_CSV(
        'Analysis/TPS_measures_{}_{}.csv'.format(script_name, unique),
        obs_dict)
    mean_TPS = sum(observer.TPS() for observer in observers) / len(observers)
    tshark_output_to_CSV(
        'Analysis/traffic_volume_{}_{}.csv'.format(script_name, unique),
        'Captures/traffic_h1_{}_{}.pcap'.format(script_name,
                                                unique), 'websocket')
    total_traffic = tshark_total_traffic(
        'Captures/traffic_h1_{}_{}.pcap'.format(script_name, unique),
        ['udp', 'tcp'])
    return [mean_TPS, total_traffic]
 if test_ID in [0, 2]:
     for depth in range(1, m_depth + 1):
         rows = [[0, 0, 0]]
         for num_tx in tx_range:
             os.system("sudo mn -c")
             result = tx_run(depth, num_tx)
             rows.append([num_tx, result[0], result[1]])
             time.sleep(100)
         with open(
                 "Analysis/tree_{}_depth_ETH_statistics_{}.csv".format(
                     depth, unique), 'w') as f:
             csvwriter = csv.writer(f)
             csvwriter.writerow(fields)
             csvwriter.writerows(rows)
         make_plots(
             "Analysis/tree_{}_depth_ETH_statistics_{}.csv".format(
                 depth, unique),
             yaxis='ETH')
         data[2**depth] = rows
     make_parametric_plots(
         "Analysis/tree_parametric_ETH_statistics_{}_{}".format(
             m_depth, unique), "Peer(s)", data, fields, 'lower right')
 if test_ID in [0, 3]:
     for depth in range(1, m_depth + 1):
         faultlink_rows = [[0, 0, 0]]
         for num_tx in tx_range:
             os.system("sudo mn -c")
             result = faultlink_tx_run(depth, num_tx)
             faultlink_rows.append([num_tx, result[0], result[1]])
             time.sleep(100)
         with open(
                 "Analysis/faultlink_tree_{}_depth_ETH_statistics_{}.csv"
def run(depth, number_of_TX):
    setLogLevel('info')
    number_of_hosts = 2**depth
    eth_nodes = ethereum_config(number_of_hosts)
    host_to_ethAddr = lambda host: eth_nodes["node{}".format(host[1:])]
    unique = time.strftime("%d_%m_%Y_%H:%M", time.localtime())
    script_name = "tree_{}_depth_{}_transactions_with_link_up_and_down_ETH".format(
        str(depth), str(number_of_TX))
    print("{}: started at {}".format(script_name, unique))
    hosts = ["h{}".format(i) for i in range(1, number_of_hosts + 1)]
    topo = TreeTopo(depth)
    net = Mininet(topo, autoSetMacs=True)
    net.start()
    net_hosts = ["h{}".format(i) for i in range(1, len(net.hosts) + 1)]
    configNetParams(net, net_hosts)
    # I run only one sniffer otherwise it doesn't work well with many tcpdumps open; and also because the traffic is the same on all interfaces.
    net.nameToNode['h1'].cmd(
        "tcpdump -s0 -i h1-eth0 -U -w Captures/traffic_h1_{}_{}.pcap &".format(
            script_name, unique))
    # the sleeps are to make tcpdump log well before traffic generation and before stop (stopping the net kills the processes that are writing to log file)
    time.sleep(2)
    for host in net_hosts:
        net.nameToNode[host].cmd("cd ethnet")
    for host in hosts:
        net.nameToNode[host].cmd(
            "nohup timeout 300 geth --ipcdisable --datadir node{}/ --syncmode 'full' --port 30310 --nat extip:{} --http --http.addr '{}' --http.port 8501 --http.api 'personal,debug,eth,net,web3,txpool,miner' --networkid 700 -unlock '{}' --password pwdfile --mine --miner.gasprice '1' --miner.gaslimit '94000000' --miner.gastarget '1' --allow-insecure-unlock --nodiscover > ../Nodes_Logs/log_{}_{}_{} &"
            .format(host[1:], hostToIp(host), hostToIp(host),
                    host_to_ethAddr(host), host, script_name, unique))
    # wait for initialization
    print('Waiting for initialization of ETH nodes')
    time.sleep(20)
    print('Inizialization done')
    net.nameToNode['h1'].cmd(
        "../Util/monitor_processes.sh ../Proc_Logs/processes_log_{}_{} &".
        format(script_name, unique))
    # other commands here
    # Start observing clients
    total_time = 200
    observers = []
    for host in hosts:
        observer = observingClient(
            total_time, "h{}".format(int(host[1]) + number_of_hosts + 1), net,
            host)
        observers.append(observer)
        observer.start()
    # Generate transactions
    tx_burst = 50
    n_bursts = number_of_TX // tx_burst
    # n_bursts = 10
    # tx_burst = number_of_TX // n_bursts
    amounts = [50, 100, 200]
    start = time.time()
    elapsed = 0
    stop_it = False
    target = "http://{}:8501"
    tx_json = lambda sender, receiver: {
        "from": sender,
        "to": receiver,
        "value": hex(amounts[randint(0, 2)])
    }
    interact_json = lambda sender, receiver: json.dumps(
        {
            "jsonrpc": "2.0",
            "method": "eth_sendTransaction",
            "params": [tx_json(sender, receiver)],
            "id": randint(10, 1000)
        })
    interaction = """curl -m 2 -X POST -H "Content-Type: application/json" --data '{}' """
    for i in range(n_bursts):
        print("Transaction burst number {}".format(i + 1))
        # round nodes
        for j in range(tx_burst):
            host_num = j % len(hosts) + 1
            host_IP = hostToIp('h{}'.format(host_num))
            payload = interaction.format(
                interact_json(host_to_ethAddr("h{}".format(host_num)),
                              "0xfb9a175032adbd79e54258cd1b4ce87f8b17e8aa")
            ) + target.format(host_IP)
            try:
                net.nameToNode['h{}'.format(number_of_hosts + 1)].cmd(payload)
            except:
                print("One transaction not sent to h{} due to some exception".
                      format(host_num))
            elapsed = time.time() - start
            if elapsed >= total_time:
                stop_it = True
                break
        if stop_it:
            break
        if (total_time // 4) <= elapsed < (total_time // 2):
            try:
                configLinkStatus(net, number_of_hosts, "down")
            except:
                print("Config link down failed")
        elif elapsed >= (total_time // 2):
            try:
                configLinkStatus(net, number_of_hosts, "up")
            except:
                print("Config link up failed")
        time.sleep(total_time // n_bursts)
    ############
    time.sleep(5)
    try:
        resetNetParams(net, net_hosts)
    except:
        pass
    try:
        net.stop()
    except:
        pass
    proc_log_name = 'processes_log_{}_{}'.format(script_name, unique)
    log_analysis("Proc_Logs/{}".format(proc_log_name), ["geth"])
    make_plots('Analysis/analysis_{}.csv'.format(proc_log_name))
    obs_dict = {obs.observedHost: obs.TPS() for obs in observers}
    export_TPS_to_CSV(
        'Analysis/TPS_measures_{}_{}.csv'.format(script_name, unique),
        obs_dict)
    mean_TPS = sum(observer.TPS() for observer in observers) / len(observers)
    tshark_output_to_CSV(
        'Analysis/traffic_volume_{}_{}.csv'.format(script_name, unique),
        'Captures/traffic_h1_{}_{}.pcap'.format(script_name, unique))
    total_traffic = tshark_total_traffic(
        'Captures/traffic_h1_{}_{}.pcap'.format(script_name, unique),
        ['tcp', 'udp'])
    return [mean_TPS, total_traffic]