def __init__(self, args): self.env = os.environ.copy() base = self.env['HOME'] + '/interference-bench/' cpu_per_node = 16 nodes = (2, 4, 8, 16) schedulers = ("cfs", "pinned_cyclic", "fifo_cyclic") affinity = ("4-11,16-23", ) self.modules_load = 'source {}/miniapps/mini.env'.format(base) def compile_command(wd, prog, nodes, oversub, size): # A HACK if prog in ("bt", "sp"): np = np_square(nodes, oversub) elif prog in ( "is", "cg", ): np = np_power2(nodes, oversub) else: np = np_func(nodes, oversub) # HACK END return self.modules_load + '; cd {} ;' \ ' make {} NPROCS={} CLASS={}'.format(wd, prog, np, size) def np_func(nodes, oversub): return nodes * oversub * cpu_per_node common_params = { 'compile_command': compile_command, 'schedulers': schedulers, 'oversub': (1, 2, 4), 'nodes': nodes, 'affinity': affinity, 'size': ( 'C', 'D', ), } mz_params = {'wd': base + "/NPB3.3.1-MZ/NPB3.3-MZ-MPI/"} self.group = \ manager.BenchGroup(Npb, **common_params, **mz_params, np=np_func, prog=("bt-mz", "sp-mz")) npb_params = {'wd': base + "/NPB3.3.1/NPB3.3-MPI/"} self.group += \ manager.BenchGroup(Npb, **common_params, **npb_params, np=np_func, prog=("ep", "lu", "mg")) def np_power2(nodes, oversub): return nodes * oversub * cpu_per_node self.group += \ manager.BenchGroup(Npb, **common_params, **npb_params, np=np_power2, prog=("is", "cg",)) self.group += \ manager.BenchGroup(Npb, **common_params, **npb_params, np=np_power2, prog=("ft",)) def np_square(nodes, oversub): np = nodes * oversub * cpu_per_node return math.floor(math.sqrt(np))**2 self.group += \ manager.BenchGroup(Npb, **common_params, **npb_params, np=np_square, prog=("bt", "sp")) self.mpilib = Mvapich(mpiexec='srun', compile_pre=self.modules_load) self.env[ 'INTERFERENCE_PERF'] = 'instructions,cache_references,cache_misses,migrations,page_faults,context_switches' self.runs = (i for i in range(3)) self.benchmarks = self.group.benchmarks self.nodelist = self.get_nodelist() self.hostfile_dir = os.environ['HOME'] + '/hostfiles' super().__init__(args)
def __init__(self, args): self.env = os.environ.copy() base = self.env['HOME'] + "/interference-bench/" self.hostfile_dir = self.env['HOME'] + '/hostfiles' cpu_per_node = 4 def np_square(nodes, oversub): np = nodes * oversub * cpu_per_node return math.floor(math.sqrt(np))**2 def np_power2(nodes, oversub): return nodes * oversub * cpu_per_node def np_func(nodes, oversub): return nodes * oversub * cpu_per_node def compile_command(wd, prog, nodes, oversub, size): # A HACK if prog in ("bt", "sp"): np = np_square(nodes, oversub) elif prog in ( "is", "cg", ): np = np_power2(nodes, oversub) else: np = np_func(nodes, oversub) # HACK END return 'cd {} ;' \ ' make {} NPROCS={} CLASS={}'.format(wd, prog, np, size) args.run_order = 'consecutive' common_params = { 'nodes': (1, ), 'schedulers': ("cfs", "pinned_blocked", "pinned_cyclic"), 'affinity': ("2-3", "1,3"), 'oversub': (1, 2, 4), 'compile_command': compile_command, 'size': ( 'W', 'S', ), } mz_params = {'wd': base + "/NPB3.3.1-MZ/NPB3.3-MZ-MPI/"} npb_params = {'wd': base + "/NPB3.3.1/NPB3.3-MPI/"} self.group = \ manager.BenchGroup(Npb, **common_params, **mz_params, np=np_func, prog=("bt-mz", "sp-mz")) self.group += \ manager.BenchGroup(Npb, **common_params, **npb_params, np=np_func, prog=("ep", "lu", "mg")) self.group += \ manager.BenchGroup(Npb, **common_params, **npb_params, np=np_power2, prog=("is", "cg",)) self.group += \ manager.BenchGroup(Npb, **common_params, **npb_params, np=np_power2, prog=("ft",)) self.group += \ manager.BenchGroup(Npb, **common_params, **npb_params, np=np_square, prog=("bt", "sp")) self.mpilib = OpenMPI() self.runs = (i for i in range(3)) self.benchmarks = self.group.benchmarks self.nodelist = self.get_nodelist() self.hostfile_dir = self.env['HOME'] + '/hostfiles' super().__init__(args)
def __init__(self, args): self.env = os.environ.copy() base = self.env['HOME'] + "/interference-bench/miniapps/" nodes = (1, 2, 4, 8, 16) cpu_per_node = 24 oversub_param = { 'oversub': (1, ), 'schedulers': ("cfs", "pinned_cyclic") } fullsub_param = {'oversub': (2, 4), 'schedulers': ("cfs")} def np_func(nodes, oversub): return nodes * cpu_per_node def comd_size_param(size, nodes, oversub): np = np_func(nodes, oversub) # Ensure that f has at least 3 groups domains = Miniapp.partition(np, 3) problem_size = '-x 200 -y 200 -z 200' decomposition = '-i {} -j {} -k {} '.format(*domains) return decomposition + problem_size self.modules_load = 'source {}/mini.env'.format(base) compile_command = self.modules_load + '; cd ../src-mpi ; make' tmpl = './{prog} {size_param}' comd_param = { 'prog': ("CoMD-mpi", ), 'size': (1, ), 'np': np_func, 'nodes': nodes, 'affinity': ("0-23", ), 'size_param': comd_size_param, 'wd': base + "CoMD/bin", 'compile_command': compile_command, 'tmpl': tmpl } self.group = \ manager.BenchGroup(Miniapp, **comd_param, **fullsub_param) + \ manager.BenchGroup(Miniapp, **comd_param, **oversub_param) compile_command = self.modules_load + '; make lassen_mpi' def lassen_size_param(size, nodes, max_nodes, oversub): np = np_func(nodes, oversub) # Ensure that f has at least 3 groups domains = Miniapp.partition(np, 3) decomposition = '{} {} {}'.format(*domains) global_zones = ' {}'.format(cpu_per_node * max_nodes * size) * 3 return "default {} {}".format(decomposition, global_zones) lassen_param = { 'prog': ("lassen_mpi", ), 'size_param': lassen_size_param, 'size': (1, ), 'affinity': ("0-23", ), 'nodes': nodes, 'np': np_func, 'max_nodes': max(nodes), 'compile_command': compile_command, 'wd': base + "lassen/", 'tmpl': tmpl } self.group += \ manager.BenchGroup(Miniapp, **lassen_param, **oversub_param) + \ manager.BenchGroup(Miniapp, **lassen_param, **fullsub_param) def lulesh_np_func(nodes): return {1: 8, 2: 27, 4: 64, 8: 125, 16: 343}[nodes] compile_command = self.modules_load + '; make' lulesh_param = { 'prog': ("lulesh2.0", ), 'size_param': ("-i 300 -c 10 -b 3", ), 'size': (1, ), 'affinity': ("0-23", ), 'nodes': nodes, 'np': lulesh_np_func, 'wd': base + "lulesh2.0.3/", 'compile_command': compile_command, 'tmpl': tmpl } self.group += \ manager.BenchGroup(Miniapp, **lulesh_param, **oversub_param) + \ manager.BenchGroup(Miniapp, **lulesh_param, **fullsub_param) self.mpiexec = 'mpirun_rsh' self.mpiexec_np = '-np' self.mpiexec_hostfile = '-hostfile {}' self.preload = 'LD_PRELOAD={}' self.lib = manager.Lib('mvapich', compile_pre=self.modules_load, compile_flags='') self.env['OMP_NUM_THREADS'] = '1' self.env['INTERFERENCE_LOCALID'] = 'MV2_COMM_WORLD_LOCAL_RANK' self.env['INTERFERENCE_HACK'] = 'true' self.prefix = 'INTERFERENCE' self.runs = (i for i in range(3)) self.benchmarks = self.group.benchmarks self.nodelist = self.get_nodelist() self.hostfile_dir = self.env['HOME'] + '/hostfiles' super().__init__(args)
def __init__(self, args): self.env = os.environ.copy() base = self.env['HOME'] + '/interference-bench/' cpu_per_node = 8 nodes = (1, 2, 4) schedulers = ("cfs", "pinned_cyclic", "pinned_blocked", 'fifo_cyclic') affinity = ("0-7", "4-7") def m(x, y): z = x.copy() z.update(y) return z self.modules_load = '. {}/mini.env'.format(base) def compile_command(wd, prog, nodes, oversub, size): # A HACK if prog in ("bt", "sp"): np = np_square(nodes, oversub) elif prog in ( "is", "cg", ): np = np_power2(nodes, oversub) else: np = np_func(nodes, oversub) # HACK END return self.modules_load + '; cd {} ;' \ ' make {} NPROCS={} CLASS={}'.format(wd, prog, np, size) def np_func(nodes, oversub): return nodes * oversub * cpu_per_node common_params = { 'compile_command': compile_command, 'schedulers': schedulers, 'oversub': (1, 2, 4), 'nodes': nodes, 'affinity': affinity, 'size': ( 'B', 'C', ), } mz_params = {'wd': base + "/NPB3.3.1-MZ/NPB3.3-MZ-MPI/"} self.group = \ manager.BenchGroup(Npb, **m(m(common_params, mz_params), {'np': np_func, 'prog': ("bt-mz", "sp-mz")})) npb_params = {'wd': base + "/NPB3.3.1/NPB3.3-MPI/"} self.group += \ manager.BenchGroup(Npb, **m(m(common_params, npb_params), {'np': np_func, 'prog': ("ep", "lu", "mg")})) def np_power2(nodes, oversub): return nodes * oversub * cpu_per_node self.group += \ manager.BenchGroup(Npb, **m(m(common_params, npb_params), {'np': np_power2, 'prog': ("is", "cg",)})) self.group += \ manager.BenchGroup(Npb, **m(m(common_params, npb_params), {'np': np_power2, 'prog': ("ft",)})) def np_square(nodes, oversub): np = nodes * oversub * cpu_per_node return math.floor(math.sqrt(np))**2 self.group += \ manager.BenchGroup(Npb, **m(m(common_params, npb_params), {'np': np_square, 'prog': ("bt", "sp")})) self.mpilib = Mvapich(mpiexec='mpirun', compile_pre=self.modules_load) self.env['INTERFERENCE_PERF'] = ','.join([ 'instructions', 'cache_references', 'page_faults', 'migrations', 'context_switches', 'cache_misses', ]) self.runs = (i for i in range(3)) self.benchmarks = self.group.benchmarks self.nodelist = self.get_nodelist() super().__init__(args)
def __init__(self, args): self.env = os.environ.copy() def np_func(nodes, cpu_per_node): return nodes * cpu_per_node def vp_func(nodes, oversub, cpu_per_node): return np_func(nodes, cpu_per_node) * oversub def comd_size_param(nodes, oversub, cpu_per_node): np = vp_func(nodes, oversub, cpu_per_node) # Ensure that f has at least 3 groups domains = Miniapp.partition(np, 3) problem_size = '-x 200 -y 200 -z 200' decomposition = '-i {} -j {} -k {} '.format(*domains) return decomposition + problem_size schedulers = ("none", ) affinity = ("0-23", ) common_params = { 'cpu_per_node': (2, 6, 12, 24), 'oversub': (1, 2, 4, 12), 'nodes': (1, 2, 4, 8, 16), 'affinity': affinity } base = self.env['HOME'] + "/interference-bench/" tmpl = './charmrun +p{np} ++mpiexec ++remote-shell {script} ' \ './{prog} +vp{vp} {size_param} ++verbose' def compile_command(wd): return 'cd {}/../ ; make'.format(wd) self.group = \ manager.BenchGroup(Miniapp, prog=("CoMD-ampi",), **common_params, size=(1,), vp=vp_func, np=np_func, schedulers=schedulers, compile_command=compile_command, size_param=comd_size_param, wd=base + "CoMD-1.1/bin/", tmpl=tmpl) def lassen_size_param(size, nodes, max_nodes, oversub): np = vp_func(nodes, oversub) # Ensure that f has at least 3 groups domains = Miniapp.partition(np, 3) decomposition = '{} {} {}'.format(*domains) global_zones = ' {}'.format(cpu_per_node * max_nodes * size) * 3 return "default {} {}".format(decomposition, global_zones) self.group += \ manager.BenchGroup(Miniapp, prog=("lassen_mpi",), **common_params, size_param=lassen_size_param, vp=vp_func, size=(1,), np=np_func, schedulers=schedulers, max_nodes=max(nodes), wd=base + "Lassen-1.0/", tmpl=tmpl) def lulesh_np_func(nodes, cpu_per_node): n = nodes * cpu_per_node c = int(n**(1 / 3.)) if c**3 == n or (c + 1)**3 == n: return n return floor(n**(1 / 3.))**3 def lulesh_vp_func(nodes, oversub, cpu_per_node): return lulesh_np_func(nodes, cpu_per_node * oversub) self.group += \ manager.BenchGroup(Miniapp, prog=("lulesh2.0",), **common_params, size=(1,), size_param=("-i 300 -c 10 -b 3",), vp=lulesh_vp_func, np=lulesh_np_func, schedulers=schedulers, wd=base + "Lulesh-2.0/", tmpl=tmpl) charm_path = self.env['HOME'] + \ '/ampi/charm/verbs-linux-x86_64-gfortran-gcc/' self.env['PATH'] = self.env['PATH'] + ":" + charm_path + "bin" self.lib = manager.Lib( 'charm', '-Dtest=ON -Dfortran=ON' ' -DMPI_CC_COMPILER=ampicc' ' -Dwrapper=OFF' ' -DMPI_CXX_COMPILER=ampicxx' ' -DMPI_CXX_INCLUDE_PATH={path}/include/' ' -DMPI_CXX_LIBRARIES={path}/lib/' ' -DMPI_C_LIBRARIES={path}/lib/' ' -DMPI_C_INCLUDE_PATH=' '{path}/include/'.format(path=charm_path)) self.prefix = 'INTERFERENCE' self.runs = (i for i in range(3)) self.benchmarks = self.group.benchmarks self.nodelist = self.get_nodelist() self.hostfile_dir = self.env['HOME'] + '/hostfiles' super().__init__(args) old_ld = self.env['LD_LIBRARY_PATH'] + \ ':' if 'LD_LIBRARY_PATH' in self.env else '' self.env['LD_LIBRARY_PATH'] = old_ld + self.get_lib_path() print(self.env['LD_LIBRARY_PATH'])
def __init__(self, args): base = "/home/desertfox/research/projects/ffmk/interference-bench/" common_params = { 'nodes': (1, ), 'schedulers': ("cfs", ), 'affinity': ("2-3", "1,3") } tmpl = './bin/charmrun +p{np} ./bin/{prog}' \ ' ++nodelist {hostfile} +vp{vp} {size} ++local' self.group = \ manager.BenchGroup(Miniapp, prog=("CoMD-ampi",), size=("-i 2 -j 1 -k 1",), vp=(2,), np=(1, 2), **common_params, wd=base + "CoMD-1.1/", tmpl=tmpl) + \ manager.BenchGroup(Miniapp, prog=("CoMD-ampi",), size=("-i 2 -j 2 -k 1",), vp=(4,), np=(1, 2, 4), **common_params, wd=base + "CoMD-1.1/", tmpl=tmpl) + \ manager.BenchGroup(Miniapp, prog=("CoMD-ampi",), size=("-i 2 -j 2 -k 2",), vp=(8,), np=(2, 4), **common_params, wd=base + "CoMD-1.1/", tmpl=tmpl) self.preload = 'LD_PRELOAD={}' self.env = os.environ.copy() charm_path = self.env[ 'HOME'] + '/research/projects/ffmk/charm/netlrts-linux-x86_64-gfortran-gcc/' self.env['PATH'] = self.env['PATH'] + ":" + charm_path + "bin" old_ld = self.env['LD_LIBRARY_PATH'] + \ ':' if 'LD_LIBRARY_PATH' in self.env else '' self.lib = manager.Lib( 'charm', '-Dtest=ON -Dfortran=ON -DMPI_CC_COMPILER=ampicc' ' -Dwrapper=OFF' ' -DMPI_CXX_COMPILER=ampicxx -DMPI_CXX_INCLUDE_PATH={path}/include/' ' -DMPI_C_INCLUDE_PATH={path}/../include/'.format(path=charm_path)) self.prefix = 'INTERFERENCE' self.runs = (i for i in range(3)) self.benchmarks = self.group.benchmarks self.nodelist = self.get_nodelist() self.hostfile_dir = self.env['HOME'] + '/hostfiles' super().__init__(args) self.env['LD_LIBRARY_PATH'] = old_ld + self.get_lib_path() print(self.env['LD_LIBRARY_PATH'])