def stop(self): self.hlist.killall("memcached mcperf") self.hlist.stop_trafgen() self.hlist.stop_mpstat() self.log(T.colored("... Waiting for qdiscs to drain", "blue")) progress(5) self.hlist.remove_qdiscs() self.hlist.rmmod_qfq() self.hlist.rmmod_eyeq() if self.opts("sniffer"): self.hsniffer.copy_local(e('', tmpdir=config['SNIFFER_TMPDIR']), self.opts("exptid") + "-snf", tmpdir=config['SNIFFER_TMPDIR']) self.hlist.copy_by_host(e('logs'), self.opts("outdir") + "/logs", self.opts("exptid")) self.hlist.clear_intel_hw_rate_limits(config['NIC_HW_QUEUES']) self.hlist.clear_mellanox_hw_rate_limits()
def start_hadoop(self): self.master = Host("10.0.1.20") self.master.cmd("cd /usr/local/hadoop/conf;" + "cp slaves.tenant1 slaves; cp masters.tenant1 masters;") self.start_dnsd() self.master.cmd("start-all.sh") progress(120) dir = self.opts("dir") cmd = "mkdir -p %s; cd /usr/local/hadoop; " % dir cmd += "hadoop jar hadoop-examples-0.20.205.0.jar " self.sid = self.get_sort_id() out = os.path.join(dir, "hadoop-progress.txt") cmd += "sort random-data sorted-data-%s " % self.sid cmd += " > %s 2>&1; " % out self.hadoop_job = self.master.cmd_async(cmd)
def stop(self): if self.opts("create"): return if self.opts("destroy"): self.hlist.remove_tenants() return try: while 1: done = self.check_hadoop_done() if done: break else: print "Waiting for hadoop job...", datetime.datetime.now() progress(240) continue except Exception, e: print "Hadoop job not found", e
def start_loadgen(self, out="loadgen.txt", tid=2, traffic=None): dir = self.opts("dir") out = os.path.join(dir, out) LOADGEN = "/root/vimal/exports/loadgen" # Start in all hosts if traffic is None: return port = 12345 + tid for h in self.hlist.lst: ip = h.get_tenant_ip(tid) cmd = "mkdir -p %s; " % dir cmd += "%s -i %s -vv " % (LOADGEN, ip) cmd += " -l %s -p 1000000 -f %s > %s" % (port, traffic, out) h.cmd_async(cmd) print "Waiting for loadgen to start..." progress(20) for h in self.hlist.lst: ip = h.get_tenant_ip(tid) p = Popen("nc -nzv %s %s" % (ip, port), shell=True) return
def start_pa_process(self, out="paggr.txt", tid=1, cpu=None, P=1): dir = self.opts("dir") out = os.path.join(dir, out) CLIENT = "/root/vimal/exports/incast_app/l25_tcp_client" SERVER = "/root/vimal/exports/incast_app/l25_tcp_server" if cpu is None: cpu = self.nextcpu self.nextcpu = (self.nextcpu + 2) % 8 for h in self.hlist.lst: ip = h.get_tenant_ip(tid) cmd = "mkdir -p %s; " % dir if args.bind: cmd += "taskset -c %s,%s " % (cpu, cpu+1) cmd += " %s %s" % (SERVER, ip) h.cmd_async(cmd) print "Waiting for servers to start..." progress(1) print "starting client (tid=%s) on %s..." % (tid, host_ips[0]) h0 = Host(host_ips[0]) # Generate input file for client size = self.opts("size") inpfile = "~/vimal/exports/incast_app/input/get%s_P%s_tenant%s.dat" % (size, P, tid) cmd = "python tests/genconfig.py --traffic paggr -n 15 " cmd += "-P %s --size %s --repeat %s --tenant %s " % (P, size, self.opts("repeat"), tid) cmd += "> %s" % inpfile Popen(cmd, shell=True).wait() dir = self.opts("dir") outfile = os.path.join(dir, out) cmd = 'mkdir -p %s; ' % dir if args.bind: cmd += "taskset -c %s,%s " % (cpu, cpu+1) cmd += " %s %s > %s" % (CLIENT, inpfile, outfile) h0.cmd_async(cmd) return
def stop(self): if self.opts("create"): return if self.opts("destroy"): self.hlist.remove_tenants() return start = datetime.datetime.now() try: while 1: done = True for i in xrange(self.opts("nhadoop")): done = done and self.check_hadoop_done(i) if done: break else: print "Waiting for hadoop job(s) start: ", start print " now: ", datetime.datetime.now() try: progress(60) except: break continue except Exception, e: print "Hadoop job not found", e
def start_loadgen(self, out="loadgen.txt", tid=2, traffic=None, cpu=None): dir = self.opts("dir") out = os.path.join(dir, out) LOADGEN = "/root/vimal/exports/loadgen" # Start in all hosts if traffic is None: return if cpu is None: cpu = self.nextcpu self.nextcpu = (self.nextcpu + 2) % 8 port = 12345 + tid for h in self.hlist.lst: ip = h.get_tenant_ip(tid) cmd = "mkdir -p %s; " % dir if args.pin: cmd += "taskset -c %s,%s " % (cpu, cpu+1) cmd += " %s -i %s -vv " % (LOADGEN, ip) cmd += " -l %s -p 1000000 -f %s > %s" % (port, traffic, out) h.cmd_async(cmd) print "Waiting for loadgen to start..." progress(5) for h in self.hlist.lst: ip = h.get_tenant_ip(tid) while 1: p = Popen("nc -nzv %s %s" % (ip, port), shell=True, stdout=PIPE, stderr=PIPE) stdout, stderr = p.communicate() if 'open' in stderr: print T.colored("started %s:%s" % (ip, port), "green") break else: print T.colored(" retrying %s:%s" % (ip, port), "yellow") sleep(0.5) return
def start(self): sniffer = self.opts("sniffer") hservers = HostList() hclients = HostList() hlist = HostList() if sniffer: hsniffer = Host(sniffer) self.log(T.colored("Servers:---", "green")) for ip in self.opts("servers"): h = Host(ip) hservers.append(h) hlist.append(h) self.log(T.colored(ip, "green")) self.log(T.colored("Clients:---", "yellow")) for ip in self.opts("clients"): h = Host(ip) hclients.append(h) hlist.append(h) self.log(T.colored(ip, "yellow")) # Reset/clear state on servers and clients hlist.rmrf(e("")) hlist.mkdir(e("logs")) # Log the servers and clients used for the experiment local_cmd("mkdir -p %s/logs" % self.opts("outdir")) hostsfile = "%s/logs/hostsfile.txt" % self.opts("outdir") hostsfd = open(hostsfile, 'w') hostsfd.write("Servers:\n") for ip in self.opts("servers"): hostsfd.write(" " + ip + "\n") hostsfd.write("Clients:\n") for ip in self.opts("clients"): hostsfd.write(" " + ip + "\n") hostsfd.close() if sniffer: hsniffer.rmrf(e("", tmpdir=config['SNIFFER_TMPDIR'])) hsniffer.mkdir(e("logs", tmpdir=config['SNIFFER_TMPDIR'])) hsniffer.cmd("killall -9 %s" % config['SNIFFER']) hlist.rmmod() hlist.stop_trafgen() hlist.cmd("sudo service memcached stop") hlist.killall("udp memcached mcperf") hlist.remove_qdiscs() hlist.clear_intel_hw_rate_limits(config['NIC_HW_QUEUES']) hlist.clear_mellanox_hw_rate_limits() sleep(1) hlist.configure_tcp_limit_output_bytes() # Find available CPU cores for all tenants start_port = self.opts("startport") avail_cpus = [ x for x in xrange(0, config['NUM_CPUS']) if x not in config['EXCLUDE_CPUS'] ] # Setup interrupt affinity # Configure interrupts to only be sent to respective CPU cores # to which the tenants are pinned if self.opts("mctenants") + 2*self.opts("trafgentenants") > len(avail_cpus): tenant_cpus = avail_cpus self.log(T.colored("WARNING: Multiple tenants sharing CPU cores", "red")) else: tenant_cpus = avail_cpus[:self.opts("mctenants") + 2 * self.opts("trafgentenants")] hlist.configure_iface_interrupt_affinity(tenant_cpus) # NOTE: Tenant to CPU cores mapping: # -- First 'mctenants' CPU cores are used to pin memcached or mcperf # instances on servers and clients respectively. # -- Next 'trafgentenants' CPU cores are used to bind trafgen sink # processes for each tenant on each host # Last 'trafgentenants' CPU cores are used to bind trafgen generator # processes for each tenant on each host # So, each trafgen tenant requires 2 CPU cores (1 for sink and 1 for # generator) # Start memcached on servers - one instance for each tenant, pinned to a # different CPU core assigned_cpus = 0 for tenant in xrange(0, self.opts("mctenants")): self.start_memcached(hservers, mem = 2048, port = start_port + tenant, threads = 1, cpus = [avail_cpus[assigned_cpus % len(avail_cpus)]]) assigned_cpus += 1 # Start trafgen servers/sinks - one instance for each tenant, pinned to # a different CPU core on each host for tenant in xrange(0, self.opts("trafgentenants")): tenant_id = "%d_%d" % (tenant, self.opts("trafgentenants")) self.start_trafgen_server(hlist, proto=self.opts("trafgenproto"), tenant_id = tenant_id, port = start_port + 1000 + tenant, cpus = [avail_cpus[assigned_cpus % len(avail_cpus)]], dir=e('logs')) assigned_cpus += 1 # If mcworkload=get, first run mcperf with set requests to full up the # cache. For each (tenant, server) pair, create a separate mcperf # instance on each client. if self.opts("mcworkload") == "get": tmp_assigned_cpus = 0 hlist.mkdir(e("logs_unused")) for tenant in xrange(0, self.opts("mctenants")): for (srv_id, hserver) in enumerate(hservers.lst): server_ip = socket.gethostbyname(hserver.hostname()) for (cli_id, hclient) in enumerate(hclients.lst): # Index of tenant and client connecting to this # particular server for this tenant. tenant_id = "%d_%d" % (tenant, self.opts("mctenants")) client_id = "%d_%d" % (cli_id, len(hclients.lst)) # We use the traffic class number as the seed for the # random number generator so that all instances # generate unique random number sequences. seed = (start_port + (tenant * len(hservers.lst) * len(hclients.lst)) + (srv_id * len(hclients.lst)) + (cli_id)) self.start_mcperf(hclient, server_ip, tenant_id, client_id, seed = seed, port = start_port + tenant, time = MC_PREPOPULATE_TIME, nconn = self.opts("mcnconn"), mcrate = 5000, mcexp = self.opts("mcexp"), workload = "set", mcsize = self.opts("mcsize"), cpus = [avail_cpus[tmp_assigned_cpus % len(avail_cpus)]], dir=e('logs_unused')) tmp_assigned_cpus += 1 self.log(T.colored("Populating caches first", "blue")) progress(MC_PREPOPULATE_TIME + 5) # Configure rate limits # mcperf tenants: # On server, configure separate rate limit to each mctenant's client # On client, configure separate rate limit to each mctenant's server # trafgen tenants: # On each host, configure separate rate limits for traffic to each # other host, for each trafgentenant trafgen_pair_rate = (0 if not self.opts("trafgentenants") else (self.opts("trafgen_total_rate") * 1.0 / (self.opts("trafgentenants") * (len(hlist.lst) - 1)))) # Client to server traffic mc_pair_rate_client = (0 if not self.opts("mctenants") else (self.opts("mc_total_rate_client") * 1.0 / (self.opts("mctenants") * len(hservers.lst)))) # Server to client traffic mc_pair_rate_server = (0 if not self.opts("mctenants") else (self.opts("mc_total_rate_server") * 1.0 / (self.opts("mctenants") * len(hclients.lst)))) self.log(T.colored("Pair rate mc client = %s" % mc_pair_rate_client, "blue")) self.log(T.colored("Pair rate mc server = %s" % mc_pair_rate_server, "blue")) self.log(T.colored("Trafgen pair rate = %s" % trafgen_pair_rate, "blue")) if self.opts("rl") == "htb": hlist.mc_add_htb_qdisc(self.opts("htb_mtu")) elif self.opts("rl") == "qfq": hlist.mc_add_qfq_qdisc(self.opts("mtu")) elif self.opts("rl") == "eyeq": hlist.mc_add_htb_qdisc(self.opts("mtu"), True) # Qdisc classes # class 1 : default class # Separate class for each (mctenant, srv_id, cli_id) tuple # (start_port + # (tenant * num_servers * num_clients) + # (srv_id * num_clients) + # (cli_id)) : On client, this represents traffic to srv_id for tenant # On server, this represents traffic to cli_id for tenant # Separate class for each (trafgentenant, hsrc, hdst) tuple # (start_port + 10000 + # (tenant * num_hosts * (num_hosts - 1)) + # (src_id * (num_hosts - 1)) + # (dst_id)) : Traffic from src_host to dst_host for each tenant for tenant in xrange(0, self.opts("mctenants")): for (srv_id, hserver) in enumerate(hservers.lst): server_ip = socket.gethostbyname(hserver.hostname()) for (cli_id, hclient) in enumerate(hclients.lst): client_ip = socket.gethostbyname(hclient.hostname()) srv_port = start_port + tenant rate_str_client = '%.3fMbit' % mc_pair_rate_client rate_str_server = '%.3fMbit' % mc_pair_rate_server klass = (start_port + (tenant * len(hservers.lst) * len(hclients.lst)) + (srv_id * len(hclients.lst)) + (cli_id)) if self.opts("rl") in ["htb", "eyeq"]: hclient.mc_add_htb_class(rate=rate_str_client, ceil=rate_str_client, klass=klass, htb_mtu=self.opts("htb_mtu")) hserver.mc_add_htb_class(rate=rate_str_server, ceil=rate_str_server, klass=klass, htb_mtu=self.opts("htb_mtu")) elif self.opts("rl") == "qfq": hclient.mc_add_qfq_class(rate=mc_pair_rate_client, klass=klass, mtu=self.opts("mtu")) hserver.mc_add_qfq_class(rate=mc_pair_rate_server, klass=klass, mtu=self.opts("mtu")) if self.opts("rl") in ["htb", "eyeq", "qfq"]: # Client -> Server traffic hclient.mc_add_qdisc_filter(server_ip, sport=0, dport=srv_port, klass=klass) # Server -> Client traffic hserver.mc_add_qdisc_filter(client_ip, sport=srv_port, dport=0, klass=klass) for tenant in xrange(0, self.opts("trafgentenants")): trafgen_port = start_port + 1000 + tenant for (src_id, hsrc) in enumerate(hlist.lst): src_ip = socket.gethostbyname(hsrc.hostname()) for (dst_id, hdst) in enumerate(hlist.lst): if hsrc == hdst: continue # NOTE: Some klass ID's would be unused (when src_id == # dst_id). # NOTE: Trafgen server -> client traffic (only ACKs) is # allocated a rate limit of only 5Mbit. dst_ip = socket.gethostbyname(hdst.hostname()) rate_str = '%.3fMbit' % trafgen_pair_rate rate_str_acks = '5Mbit' klass = (start_port + 10000 + (tenant * len(hlist.lst) * len(hlist.lst)) + (src_id * len(hlist.lst)) + (dst_id)) if self.opts("rl") in ["htb", "eyeq"]: hsrc.mc_add_htb_class(rate=rate_str, ceil=rate_str, klass=klass, htb_mtu=self.opts("htb_mtu")) hdst.mc_add_htb_class(rate=rate_str_acks, ceil=rate_str_acks, klass=klass, htb_mtu=self.opts("htb_mtu")) elif self.opts("rl") == "qfq": hsrc.mc_add_qfq_class(rate=trafgen_pair_rate, klass=klass, mtu=self.opts("mtu")) hdst.mc_add_qfq_class(rate=5, klass=klass, mtu=self.opts("mtu")) if self.opts("rl") in ["htb", "eyeq", "qfq"]: # Trafgen client -> server traffic filter hsrc.mc_add_qdisc_filter(dst_ip, sport=0, dport=trafgen_port, klass=klass) # Trafgen server -> client traffic filter (only affects ACKs) hdst.mc_add_qdisc_filter(src_ip, sport=trafgen_port, dport=0, klass=klass) hlist.start_bw_monitor(e('logs')) hlist.start_mpstat(e('logs')) hlist.start_perf_monitor(e('logs'), self.opts("t")) hlist.set_mtu(self.opts("mtu")) if sniffer: hsniffer.start_sniffer_delayed(e('logs', tmpdir=config['SNIFFER_TMPDIR']), board=0, delay=config['SNIFFER_DELAY'], duration=config['SNIFFER_DURATION']) sleep(1) # Start trafgen clients to generate background all-to-all traffic. # For each (tenant, destination) pair, create a separate trafgen # instance on the source host. This is required since trafgen currently # only supports a single destination per instance. for tenant in xrange(0, self.opts("trafgentenants")): tenant_id = "%d_%d" % (tenant, self.opts("trafgentenants")) for hsrc in hlist.lst: for hdst in hlist.lst: if hsrc == hdst: continue dst_ip = socket.gethostbyname(hdst.hostname()) self.start_trafgen_client(hsrc, dst_ip, tenant_id = tenant_id, proto=self.opts("trafgenproto"), port = start_port + 1000 + tenant, mtu = self.opts("mtu"), cpus = [avail_cpus[assigned_cpus % len(avail_cpus)]], dir = e('logs')) assigned_cpus += 1 # Start mcperf clients to generate requests. For each (tenant, server) # pair, create a separate mcperf instance. This is required since mcperf # does not have an option to send requests randomly to the available # memcached servers. tmp_assigned_cpus = 0 for tenant in xrange(0, self.opts("mctenants")): for hserver in hservers.lst: server_ip = socket.gethostbyname(hserver.hostname()) for (cli_id, hclient) in enumerate(hclients.lst): # Index of tenant and client connecting to this particular # server for this tenant. tenant_id = "%d_%d" % (tenant, self.opts("mctenants")) client_id = "%d_%d" % (cli_id, len(hclients.lst)) # We use traffic class number as the seed for the # random number generator so that all instances # generate unique random number sequences. But we want to # use a different seed that that used for the initial "set" # workload for populating caches. So we increment the # respective class numbers by say 11213 to get the seed. seed = (start_port + 11213 + (tenant * len(hservers.lst) * len(hclients.lst)) + (srv_id * len(hclients.lst)) + (cli_id)) self.start_mcperf(hclient, server_ip, tenant_id, client_id, seed = seed, port = start_port + tenant, time = self.opts("t"), nconn = self.opts("mcnconn"), mcrate = self.opts("mcrate"), mcexp = self.opts("mcexp"), workload = self.opts("mcworkload"), mcsize = self.opts("mcsize"), cpus = [avail_cpus[tmp_assigned_cpus % len(avail_cpus)]], dir=e('logs')) tmp_assigned_cpus += 1 self.hservers = hservers self.hclients = hclients self.hlist = hlist if sniffer: self.hsniffer = hsniffer
def start(self): sniffer = self.opts("sniffer") hservers = HostList() hclients = HostList() hlist = HostList() if sniffer: hsniffer = Host(sniffer) self.log(T.colored("Servers:---", "green")) for ip in self.opts("servers"): h = Host(ip) hservers.append(h) hlist.append(h) self.log(T.colored(ip, "green")) self.log(T.colored("Clients:---", "yellow")) for ip in self.opts("clients"): h = Host(ip) hclients.append(h) hlist.append(h) self.log(T.colored(ip, "yellow")) # Reset/clear state on servers and clients hlist.rmrf(e("")) hlist.mkdir(e("logs")) # Log the servers and clients used for the experiment local_cmd("mkdir -p %s/logs" % self.opts("outdir")) hostsfile = "%s/logs/hostsfile.txt" % self.opts("outdir") hostsfd = open(hostsfile, 'w') hostsfd.write("Servers:\n") for ip in self.opts("servers"): hostsfd.write(" " + ip + "\n") hostsfd.write("Clients:\n") for ip in self.opts("clients"): hostsfd.write(" " + ip + "\n") hostsfd.close() if sniffer: hsniffer.rmrf(e("", tmpdir=config['SNIFFER_TMPDIR'])) hsniffer.mkdir(e("logs", tmpdir=config['SNIFFER_TMPDIR'])) hsniffer.cmd("killall -9 %s" % config['SNIFFER']) hlist.rmmod() hlist.stop_trafgen() hlist.cmd("sudo service memcached stop") hlist.killall("udp memcached mcperf") hlist.remove_qdiscs() hlist.clear_intel_hw_rate_limits(config['NIC_HW_QUEUES']) hlist.clear_mellanox_hw_rate_limits() sleep(1) hlist.configure_tcp_limit_output_bytes() # Find available CPU cores for all tenants start_port = self.opts("startport") avail_cpus = [ x for x in xrange(0, config['NUM_CPUS']) if x not in config['EXCLUDE_CPUS'] ] # Setup interrupt affinity # Configure interrupts to only be sent to respective CPU cores # to which the tenants are pinned if self.opts("mctenants") + 2 * self.opts("trafgentenants") > len( avail_cpus): tenant_cpus = avail_cpus self.log( T.colored("WARNING: Multiple tenants sharing CPU cores", "red")) else: tenant_cpus = avail_cpus[:self.opts("mctenants") + 2 * self.opts("trafgentenants")] hlist.configure_iface_interrupt_affinity(tenant_cpus) # NOTE: Tenant to CPU cores mapping: # -- First 'mctenants' CPU cores are used to pin memcached or mcperf # instances on servers and clients respectively. # -- Next 'trafgentenants' CPU cores are used to bind trafgen sink # processes for each tenant on each host # Last 'trafgentenants' CPU cores are used to bind trafgen generator # processes for each tenant on each host # So, each trafgen tenant requires 2 CPU cores (1 for sink and 1 for # generator) # Start memcached on servers - one instance for each tenant, pinned to a # different CPU core assigned_cpus = 0 for tenant in xrange(0, self.opts("mctenants")): self.start_memcached( hservers, mem=2048, port=start_port + tenant, threads=1, cpus=[avail_cpus[assigned_cpus % len(avail_cpus)]]) assigned_cpus += 1 # Start trafgen servers/sinks - one instance for each tenant, pinned to # a different CPU core on each host for tenant in xrange(0, self.opts("trafgentenants")): tenant_id = "%d_%d" % (tenant, self.opts("trafgentenants")) self.start_trafgen_server( hlist, proto=self.opts("trafgenproto"), tenant_id=tenant_id, port=start_port + 1000 + tenant, cpus=[avail_cpus[assigned_cpus % len(avail_cpus)]], dir=e('logs')) assigned_cpus += 1 # If mcworkload=get, first run mcperf with set requests to full up the # cache. For each (tenant, server) pair, create a separate mcperf # instance on each client. if self.opts("mcworkload") == "get": tmp_assigned_cpus = 0 hlist.mkdir(e("logs_unused")) for tenant in xrange(0, self.opts("mctenants")): for (srv_id, hserver) in enumerate(hservers.lst): server_ip = socket.gethostbyname(hserver.hostname()) for (cli_id, hclient) in enumerate(hclients.lst): # Index of tenant and client connecting to this # particular server for this tenant. tenant_id = "%d_%d" % (tenant, self.opts("mctenants")) client_id = "%d_%d" % (cli_id, len(hclients.lst)) # We use the traffic class number as the seed for the # random number generator so that all instances # generate unique random number sequences. seed = ( start_port + (tenant * len(hservers.lst) * len(hclients.lst)) + (srv_id * len(hclients.lst)) + (cli_id)) self.start_mcperf(hclient, server_ip, tenant_id, client_id, seed=seed, port=start_port + tenant, time=MC_PREPOPULATE_TIME, nconn=self.opts("mcnconn"), mcrate=5000, mcexp=self.opts("mcexp"), workload="set", mcsize=self.opts("mcsize"), cpus=[ avail_cpus[tmp_assigned_cpus % len(avail_cpus)] ], dir=e('logs_unused')) tmp_assigned_cpus += 1 self.log(T.colored("Populating caches first", "blue")) progress(MC_PREPOPULATE_TIME + 5) # Configure rate limits # mcperf tenants: # On server, configure separate rate limit to each mctenant's client # On client, configure separate rate limit to each mctenant's server # trafgen tenants: # On each host, configure separate rate limits for traffic to each # other host, for each trafgentenant trafgen_pair_rate = (0 if not self.opts("trafgentenants") else (self.opts("trafgen_total_rate") * 1.0 / (self.opts("trafgentenants") * (len(hlist.lst) - 1)))) # Client to server traffic mc_pair_rate_client = (0 if not self.opts("mctenants") else (self.opts("mc_total_rate_client") * 1.0 / (self.opts("mctenants") * len(hservers.lst)))) # Server to client traffic mc_pair_rate_server = (0 if not self.opts("mctenants") else (self.opts("mc_total_rate_server") * 1.0 / (self.opts("mctenants") * len(hclients.lst)))) self.log( T.colored("Pair rate mc client = %s" % mc_pair_rate_client, "blue")) self.log( T.colored("Pair rate mc server = %s" % mc_pair_rate_server, "blue")) self.log( T.colored("Trafgen pair rate = %s" % trafgen_pair_rate, "blue")) if self.opts("rl") == "htb": hlist.mc_add_htb_qdisc(self.opts("htb_mtu")) elif self.opts("rl") == "qfq": hlist.mc_add_qfq_qdisc(self.opts("mtu")) elif self.opts("rl") == "eyeq": hlist.mc_add_htb_qdisc(self.opts("mtu"), True) # Qdisc classes # class 1 : default class # Separate class for each (mctenant, srv_id, cli_id) tuple # (start_port + # (tenant * num_servers * num_clients) + # (srv_id * num_clients) + # (cli_id)) : On client, this represents traffic to srv_id for tenant # On server, this represents traffic to cli_id for tenant # Separate class for each (trafgentenant, hsrc, hdst) tuple # (start_port + 10000 + # (tenant * num_hosts * (num_hosts - 1)) + # (src_id * (num_hosts - 1)) + # (dst_id)) : Traffic from src_host to dst_host for each tenant for tenant in xrange(0, self.opts("mctenants")): for (srv_id, hserver) in enumerate(hservers.lst): server_ip = socket.gethostbyname(hserver.hostname()) for (cli_id, hclient) in enumerate(hclients.lst): client_ip = socket.gethostbyname(hclient.hostname()) srv_port = start_port + tenant rate_str_client = '%.3fMbit' % mc_pair_rate_client rate_str_server = '%.3fMbit' % mc_pair_rate_server klass = (start_port + (tenant * len(hservers.lst) * len(hclients.lst)) + (srv_id * len(hclients.lst)) + (cli_id)) if self.opts("rl") in ["htb", "eyeq"]: hclient.mc_add_htb_class(rate=rate_str_client, ceil=rate_str_client, klass=klass, htb_mtu=self.opts("htb_mtu")) hserver.mc_add_htb_class(rate=rate_str_server, ceil=rate_str_server, klass=klass, htb_mtu=self.opts("htb_mtu")) elif self.opts("rl") == "qfq": hclient.mc_add_qfq_class(rate=mc_pair_rate_client, klass=klass, mtu=self.opts("mtu")) hserver.mc_add_qfq_class(rate=mc_pair_rate_server, klass=klass, mtu=self.opts("mtu")) if self.opts("rl") in ["htb", "eyeq", "qfq"]: # Client -> Server traffic hclient.mc_add_qdisc_filter(server_ip, sport=0, dport=srv_port, klass=klass) # Server -> Client traffic hserver.mc_add_qdisc_filter(client_ip, sport=srv_port, dport=0, klass=klass) for tenant in xrange(0, self.opts("trafgentenants")): trafgen_port = start_port + 1000 + tenant for (src_id, hsrc) in enumerate(hlist.lst): src_ip = socket.gethostbyname(hsrc.hostname()) for (dst_id, hdst) in enumerate(hlist.lst): if hsrc == hdst: continue # NOTE: Some klass ID's would be unused (when src_id == # dst_id). # NOTE: Trafgen server -> client traffic (only ACKs) is # allocated a rate limit of only 5Mbit. dst_ip = socket.gethostbyname(hdst.hostname()) rate_str = '%.3fMbit' % trafgen_pair_rate rate_str_acks = '5Mbit' klass = (start_port + 10000 + (tenant * len(hlist.lst) * len(hlist.lst)) + (src_id * len(hlist.lst)) + (dst_id)) if self.opts("rl") in ["htb", "eyeq"]: hsrc.mc_add_htb_class(rate=rate_str, ceil=rate_str, klass=klass, htb_mtu=self.opts("htb_mtu")) hdst.mc_add_htb_class(rate=rate_str_acks, ceil=rate_str_acks, klass=klass, htb_mtu=self.opts("htb_mtu")) elif self.opts("rl") == "qfq": hsrc.mc_add_qfq_class(rate=trafgen_pair_rate, klass=klass, mtu=self.opts("mtu")) hdst.mc_add_qfq_class(rate=5, klass=klass, mtu=self.opts("mtu")) if self.opts("rl") in ["htb", "eyeq", "qfq"]: # Trafgen client -> server traffic filter hsrc.mc_add_qdisc_filter(dst_ip, sport=0, dport=trafgen_port, klass=klass) # Trafgen server -> client traffic filter (only affects ACKs) hdst.mc_add_qdisc_filter(src_ip, sport=trafgen_port, dport=0, klass=klass) hlist.start_bw_monitor(e('logs')) hlist.start_mpstat(e('logs')) hlist.start_perf_monitor(e('logs'), self.opts("t")) hlist.set_mtu(self.opts("mtu")) if sniffer: hsniffer.start_sniffer_delayed(e('logs', tmpdir=config['SNIFFER_TMPDIR']), board=0, delay=config['SNIFFER_DELAY'], duration=config['SNIFFER_DURATION']) sleep(1) # Start trafgen clients to generate background all-to-all traffic. # For each (tenant, destination) pair, create a separate trafgen # instance on the source host. This is required since trafgen currently # only supports a single destination per instance. for tenant in xrange(0, self.opts("trafgentenants")): tenant_id = "%d_%d" % (tenant, self.opts("trafgentenants")) for hsrc in hlist.lst: for hdst in hlist.lst: if hsrc == hdst: continue dst_ip = socket.gethostbyname(hdst.hostname()) self.start_trafgen_client( hsrc, dst_ip, tenant_id=tenant_id, proto=self.opts("trafgenproto"), port=start_port + 1000 + tenant, mtu=self.opts("mtu"), cpus=[avail_cpus[assigned_cpus % len(avail_cpus)]], dir=e('logs')) assigned_cpus += 1 # Start mcperf clients to generate requests. For each (tenant, server) # pair, create a separate mcperf instance. This is required since mcperf # does not have an option to send requests randomly to the available # memcached servers. tmp_assigned_cpus = 0 for tenant in xrange(0, self.opts("mctenants")): for hserver in hservers.lst: server_ip = socket.gethostbyname(hserver.hostname()) for (cli_id, hclient) in enumerate(hclients.lst): # Index of tenant and client connecting to this particular # server for this tenant. tenant_id = "%d_%d" % (tenant, self.opts("mctenants")) client_id = "%d_%d" % (cli_id, len(hclients.lst)) # We use traffic class number as the seed for the # random number generator so that all instances # generate unique random number sequences. But we want to # use a different seed that that used for the initial "set" # workload for populating caches. So we increment the # respective class numbers by say 11213 to get the seed. seed = (start_port + 11213 + (tenant * len(hservers.lst) * len(hclients.lst)) + (srv_id * len(hclients.lst)) + (cli_id)) self.start_mcperf( hclient, server_ip, tenant_id, client_id, seed=seed, port=start_port + tenant, time=self.opts("t"), nconn=self.opts("mcnconn"), mcrate=self.opts("mcrate"), mcexp=self.opts("mcexp"), workload=self.opts("mcworkload"), mcsize=self.opts("mcsize"), cpus=[avail_cpus[tmp_assigned_cpus % len(avail_cpus)]], dir=e('logs')) tmp_assigned_cpus += 1 self.hservers = hservers self.hclients = hclients self.hlist = hlist if sniffer: self.hsniffer = hsniffer