Exemplo n.º 1
0
def main():
  from scripts.utils import create_project_tarball, get_stencil_num
  from scripts.conf.conf import machine_conf, machine_info
  import os, sys
  from csv import DictReader
  import time,datetime

  dry_run = 1 if len(sys.argv)<2 else int(sys.argv[1])

  time_stamp = datetime.datetime.fromtimestamp(time.time()).strftime('%Y%m%d_%H_%M')
  exp_name = "pluto_thread_scaling_at_%s_%s" % (machine_info['hostname'], time_stamp)

  tarball_dir='results/'+exp_name
  if(dry_run==0): create_project_tarball(tarball_dir, "test_"+exp_name)
  target_dir='results/' + exp_name

  # parse the results to find out which of the already exist
  data = []
  data_file = os.path.join('results', 'summary.csv')
  try:
    with open(data_file, 'rb') as output_file:
      raw_data = DictReader(output_file)
      for k in raw_data:
        kernel = get_stencil_num(k)
        if(kernel==0):
          k['stencil'] ='3d25pt'
        elif(kernel==1):
          k['stencil'] ='3d7pt'
        elif(kernel==4):
          k['stencil'] ='3d25pt_var'
        elif(kernel==5):
          k['stencil'] ='3d7pt_var'
        else:
          raise
        data.append(k)
  except:
     pass
  param_l = dict()
  for k in data:
    try:
      param_l[(k['stencil'], int(k['Global NX']), k['LIKWID performance counter']  ) ] = ([int(k['PLUTO tile size of loop 1']), int(k['PLUTO tile size of loop 3']), int(k['PLUTO tile size of loop 4'])], int(k['Number of time steps']) )
    except:
      print k
      raise


  count = 0
  for group in ['MEM', 'L2']:
#  for group in ['MEM', 'L2', 'L3', 'DATA', 'TLB_DATA', 'ENERGY']:
    if(machine_info['hostname']=='Haswell_18core'):
      machine_conf['pinning_args'] = " -m -g " + group + " -C S1:0-"
    elif(machine_info['hostname']=='IVB_10core'):
      if group=='TLB_DATA': group='TLB'
      machine_conf['pinning_args'] = " -g " + group + " -C S0:0-"
#    for k,v in param_l.iteritems(): print k,v
    count = count + thread_scaling_test(dry_run, target_dir, exp_name, param_l=param_l, group=group)

  print "experiments count =" + str(count)
def main():
    from scripts.utils import create_project_tarball, get_stencil_num
    from scripts.conf.conf import machine_conf, machine_info
    import os, sys
    from csv import DictReader
    import time, datetime

    dry_run = 1 if len(sys.argv) < 2 else int(sys.argv[1])

    time_stamp = datetime.datetime.fromtimestamp(
        time.time()).strftime('%Y%m%d_%H_%M')
    exp_name = "pochoir_increasing_grid_size_at_%s_%s" % (
        machine_info['hostname'], time_stamp)

    tarball_dir = 'results/' + exp_name
    if (dry_run == 0):
        create_project_tarball(tarball_dir, "project_" + exp_name)
    target_dir = 'results/' + exp_name

    # parse the results to find out which of the already exist
    data = []
    data_file = os.path.join('results', 'summary.csv')
    try:
        with open(data_file, 'rb') as output_file:
            raw_data = DictReader(output_file)
            for k in raw_data:
                k['stencil'] = get_stencil_num(k)
                data.append(k)
    except:
        pass
    params = set()
    for k in data:
        try:
            params.add((k['stencil'], int(k['Global NX'])))
        except:
            print k
            raise

    #update the pinning information to use all cores
    th = machine_info['n_cores']

    count = 0
    for group in ['MEM', 'TLB_DATA', 'L2', 'L3', 'DATA']:  #, 'ENERGY']:
        if (machine_info['hostname'] == 'IVB_10core'):
            if group == 'TLB_DATA': group = 'TLB'
        machine_conf[
            'pinning_args'] = " -m -g " + group + " -c " + "%d-%d " % (
                0, th - 1) + '-- numactl --physcpubind=%d-%d' % (0, th - 1)
        #    for k in params: print k
        count = count + igs_test(
            dry_run, target_dir, exp_name, th=th, params=params, group=group)

    print "experiments count =" + str(count)
def main():
  from scripts.utils import create_project_tarball, get_stencil_num
  from scripts.conf.conf import machine_conf, machine_info
  import os, sys
  from csv import DictReader
  import time,datetime

  dry_run = 1 if len(sys.argv)<2 else int(sys.argv[1])

  time_stamp = datetime.datetime.fromtimestamp(time.time()).strftime('%Y%m%d_%H_%M')
  exp_name = "pochoir_increasing_grid_size_at_%s_%s" % (machine_info['hostname'], time_stamp)  

  tarball_dir='results/'+exp_name
  if(dry_run==0): create_project_tarball(tarball_dir, "project_"+exp_name)
  target_dir='results/' + exp_name 

  # parse the results to find out which of the already exist
  data = []
  data_file = os.path.join('results', 'summary.csv')
  try:
    with open(data_file, 'rb') as output_file:
      raw_data = DictReader(output_file)
      for k in raw_data:
        k['stencil'] = get_stencil_num(k)
        data.append(k)
  except:
     pass
  params = set()
  for k in data:
    try:
      params.add( (k['stencil'], int(k['Global NX'])) )
    except:
      print k
      raise

  #update the pinning information to use all cores
  th = machine_info['n_cores']

  count = 0
  for group in ['MEM', 'TLB_DATA', 'L2', 'L3', 'DATA']:#, 'ENERGY']:
    if(machine_info['hostname']=='IVB_10core'):
      if group=='TLB_DATA': group='TLB' 
    machine_conf['pinning_args'] = " -m -g " + group + " -c " + "%d-%d "%(0, th-1) + '-- numactl --physcpubind=%d-%d'%(0,th-1)
#    for k in params: print k
    count = count + igs_test(dry_run, target_dir, exp_name, th=th, params=params, group=group) 

  print "experiments count =" + str(count)
def main():
    from scripts.utils import create_project_tarball, get_stencil_num
    from scripts.conf.conf import machine_conf, machine_info
    import os, sys
    from csv import DictReader
    import time, datetime

    dry_run = 1 if len(sys.argv) < 2 else int(sys.argv[1])

    time_stamp = datetime.datetime.fromtimestamp(time.time()).strftime("%Y%m%d_%H_%M")
    exp_name = "pochoir_thread_scaling_at_%s_%s" % (machine_info["hostname"], time_stamp)

    tarball_dir = "results/" + exp_name
    if dry_run == 0:
        create_project_tarball(tarball_dir, "project_" + exp_name)
    target_dir = "results/" + exp_name

    # parse the results to find out which of the already exist
    data = []
    data_file = os.path.join("results", "summary.csv")
    try:
        with open(data_file, "rb") as output_file:
            raw_data = DictReader(output_file)
            for k in raw_data:
                k["stencil"] = get_stencil_num(k)
                data.append(k)
    except:
        pass
    params = set()
    for k in data:
        try:
            params.add((k["stencil"], int(k["Global NX"])))
        except:
            print k
            raise

    count = 0
    for group in ["MEM", "L2"]:  # 'TLB_DATA', 'L3', 'DATA', 'ENERGY']:
        #    for k in params: print k
        count = count + thread_scaling_test(dry_run, target_dir, exp_name, params=params, group=group)

    print "experiments count =" + str(count)
Exemplo n.º 5
0
def main():
  from scripts.utils import create_project_tarball, get_stencil_num
  from scripts.conf.conf import machine_conf, machine_info
  import os, sys
  from csv import DictReader
  import time,datetime

  dry_run = 1 if len(sys.argv)<2 else int(sys.argv[1])

  time_stamp = datetime.datetime.fromtimestamp(time.time()).strftime('%Y%m%d_%H_%M')
  exp_name = "pochoir_thread_scaling_at_%s_%s" % (machine_info['hostname'], time_stamp)  

  tarball_dir='results/'+exp_name
  if(dry_run==0): create_project_tarball(tarball_dir, "project_"+exp_name)
  target_dir='results/' + exp_name 

  # parse the results to find out which of the already exist
  data = []
  data_file = os.path.join('results', 'summary.csv')
  try:
    with open(data_file, 'rb') as output_file:
      raw_data = DictReader(output_file)
      for k in raw_data:
        k['stencil'] = get_stencil_num(k)
        data.append(k)
  except:
     pass
  params = set()
  for k in data:
    try:
      params.add( (k['stencil'], int(k['Global NX'])) )
    except:
      print k
      raise

  count = 0
  for group in ['MEM', 'L2']:# 'TLB_DATA', 'L3', 'DATA', 'ENERGY']:
#    for k in params: print k
    count = count + thread_scaling_test(dry_run, target_dir, exp_name, params=params, group=group) 

  print "experiments count =" + str(count)
Exemplo n.º 6
0
def parse_entry_info(k, is_tgs_only):
    from scripts.utils import get_stencil_num, load_csv

    #    # get processor name from the file names
    #    if(k['OpenMP Threads']!=''):
    #      if(int(k['OpenMP Threads']) == 10):
    #        machine_name = 'ivb10'
    #      elif(int(k['OpenMP Threads']) == 18):
    #        machine_name = 'hw18'

    # Use single field to represent the performance
    if 'Total RANK0 MStencil/s MAX' in k.keys():
        if (k['Total RANK0 MStencil/s MAX'] != ''):
            k['MStencil/s  MAX'] = k['MWD main-loop RANK0 MStencil/s MAX']
    # temporary for deprecated format
    if 'RANK0 MStencil/s  MAX' in k.keys():
        if k['RANK0 MStencil/s  MAX'] != '':
            k['MStencil/s  MAX'] = k['RANK0 MStencil/s  MAX']

    # add stencil operator
    k['stencil'] = get_stencil_num(k)
    if k['stencil'] == 0:
        k['stencil_name'] = '25_pt_const'
    elif k['stencil'] == 1:
        k['stencil_name'] = '7_pt_const'
    elif k['stencil'] == 4:
        k['stencil_name'] = '25_pt_var'
    elif k['stencil'] == 5:
        k['stencil_name'] = '7_pt_var'
    elif k['stencil'] == 6:
        k['stencil_name'] = 'solar'

    # add the approach
    if (k['Time stepper orig name'] == 'Spatial Blocking'):
        k['method'] = 'Spt.blk.'
    elif (k['Time stepper orig name'] in ['PLUTO', 'Pochoir']):
        k['method'] = k['Time stepper orig name']
    elif (k['Time stepper orig name'] == 'Diamond'):
        if ('_tgs1_' in k['file_name']):
            k['method'] = '1WD'
        else:
            k['method'] = 'MWD'
    else:
        print("ERROR: Unknow time stepper")
        raise

    # TEMPORARY: Older versions of Pochoir did not have OpenMP threads count
    if (k['method'] == 'Pochoir' and k['OpenMP Threads'] == ''):
        k['OpenMP Threads'] = 0

    # add mwd type
    k['mwdt'] = 'none'
    if (k['method'] == 'MWD'):
        mwd = k['Wavefront parallel strategy'].lower()
        if ('fixed' in mwd) and ('relaxed' in mwd):
            k['mwdt'] = 'fers'
        elif ('fixed' in mwd):
            k['mwdt'] = 'fe'
        elif ('relaxed' in mwd):
            k['mwdt'] = 'rs'
        elif ('wavefront' in mwd):
            k['mwdt'] = 'block'

    # add precision information
    p = 1 if k['Precision'] in 'DP' else 0
    k['Precision'] = p

    # TLB measurement for LIKWID 4
    if 'L1 DTLB load miss rate avg' in k.keys():
        if k['L1 DTLB load miss rate avg'] != '':
            hw_ctr_fields['TLB'] = [('L1 DTLB load miss rate avg', float)]
            hw_ctr_labels['TLB'] = [('L1 DTLB load miss rate avg', 'tlb_',
                                     'tlb')]

    # parse the general fileds' format
    for f in req_fields + hw_ctr_fields[k['LIKWID performance counter']]:
        try:
            k[f[0]] = map(f[1], [k[f[0]]])[0]
        except:
            print("ERROR: results entry missing essential data at file:%s" %
                  (k['file_name']))
            print f[0]
            print k
            return

    # Parse the L2 data per thread
    if (k['LIKWID performance counter'] == 'L2'):
        k['L2 vol list'] = []
        for i in range(k['OpenMP Threads']):
            field = 'L2 data volume core %d' % i
            if field in k.keys():
                k['L2 vol list'].append(float(k['L2 data volume core %d' % i]))

    k['tgsl'] = k['Thread group size']
    if (is_tgs_only == 0):  # regular mode for all MWD
        if (k['method'] == 'MWD'):
            k['tgsl'] = 100
Exemplo n.º 7
0
def main():
    import sys
    from scripts.utils import get_stencil_num, load_csv
    from collections import OrderedDict

    raw_data = load_csv(sys.argv[1])

    req_fields = [('MStencil/s  MAX', float), ('Precision', int),
                  ('Global NX', int), ('Number of time steps', int),
                  ('Number of tests', int)]

    hw_ctr_fields = {
        '': [],
        'TLB': [('L1 DTLB miss rate sum', float)],
        'DATA': [('Load to Store ratio avg', float)],
        'L2': [('L2 data volume sum', float)],
        'L3': [('L3 data volume sum', float)],
        'MEM': [('Total Memory Transfer', float),
                ('Sustained Memory BW', float)],
        'ENERGY': [('Energy', float), ('Energy DRAM', float), ('Power', float),
                   ('Power DRAM', float)]
    }

    duplicates = set()
    meas_figs = dict()
    perf_fig = dict()
    for k in raw_data:

        # get processor name from the file names
        if (k['OpenMP Threads'] != ''):
            if (int(k['OpenMP Threads']) == 10):
                machine_name = 'ivb10'
            elif (int(k['OpenMP Threads']) == 18):
                machine_name = 'hw18'

        # Use single field to represent the performance
        if 'Total RANK0 MStencil/s MAX' in k.keys():
            if (k['Total RANK0 MStencil/s MAX'] != ''):
                k['MStencil/s  MAX'] = k['MWD main-loop RANK0 MStencil/s MAX']
        # temporary for deprecated format
        if 'RANK0 MStencil/s  MAX' in k.keys():
            if k['RANK0 MStencil/s  MAX'] != '':
                k['MStencil/s  MAX'] = k['RANK0 MStencil/s  MAX']

        # add stencil operator
        k['stencil'] = get_stencil_num(k)
        if k['stencil'] == 0:
            k['stencil_name'] = '25_pt_const'
        elif k['stencil'] == 1:
            k['stencil_name'] = '7_pt_const'
        elif k['stencil'] == 4:
            k['stencil_name'] = '25_pt_var'
        elif k['stencil'] == 5:
            k['stencil_name'] = '7_pt_var'
        elif k['stencil'] == 6:
            k['stencil_name'] = 'solar'

        # add the approach
        if (k['Time stepper orig name'] == 'Spatial Blocking'):
            k['method'] = 'Spt.blk.'
        elif (k['Time stepper orig name'] in ['PLUTO', 'Pochoir']):
            k['method'] = k['Time stepper orig name']
        elif (k['Time stepper orig name'] == 'Diamond'):
            if ('_tgs1_' in k['file_name']):
                k['method'] = '1WD'
            else:
                k['method'] = 'MWD'
        else:
            print("ERROR: Unknow time stepper")
            raise

        # add mwd type
        k['mwdt'] = 'none'
        if (k['method'] == 'MWD'):
            mwd = k['Wavefront parallel strategy'].lower()
            if ('fixed' in mwd) and ('relaxed' in mwd):
                k['mwdt'] = 'fers'
            elif ('fixed' in mwd):
                k['mwdt'] = 'fe'
            elif ('relaxed' in mwd):
                k['mwdt'] = 'rs'
            elif ('wavefront' in mwd):
                k['mwdt'] = 'block'

        # add precision information
        p = 1 if k['Precision'] in 'DP' else 0
        k['Precision'] = p

        # TLB measurement for LIKWID 4
        if 'L1 DTLB load miss rate avg' in k.keys():
            if k['L1 DTLB load miss rate avg'] != '':
                hw_ctr_fields['TLB'] = [('L1 DTLB load miss rate avg', float)]
                hw_ctr_labels['TLB'] = [('L1 DTLB load miss rate avg', 'tlb_',
                                         'tlb')]

        entry = {}
        # parse the general fileds' format
        for f in req_fields + hw_ctr_fields[k['LIKWID performance counter']]:
            try:
                entry[f[0]] = map(f[1], [k[f[0]]])[0]
            except:
                print(
                    "ERROR: results entry missing essential data at file:%s" %
                    (k['file_name']))
                print f[0]
                print k
                return

        #find repeated data
        key = (entry['Precision'], k['stencil_name'],
               k['LIKWID performance counter'], k['mwdt'], k['method'],
               entry['Global NX'])
        if key not in duplicates:
            duplicates.add(key)
        else:
            print("Repeated result at: %s" % (k['file_name']))
            continue

        # Initialize plot entry if does not exist for current data entry
#    for m,n in entry.iteritems(): print m,n
        measure_list = [
            'n', 'perf', 'total energy', 'tlb', 'mem bw', 'l2 bw', 'l3 bw',
            'mem vol', 'l2 vol', 'l3 vol', 'data', 'tgs', 'thx', 'thy', 'thz',
            'blk size', 'diam width', 'bs_z'
        ]
        plot_key = (entry['Precision'], k['stencil_name'],
                    k['LIKWID performance counter'])
        line_key = (k['mwdt'], k['method'])
        if plot_key not in meas_figs.keys():
            meas_figs[plot_key] = {}
        if line_key not in meas_figs[plot_key].keys():
            meas_figs[plot_key][line_key] = {meas: [] for meas in measure_list}

        # append the measurement data
        meas_figs[plot_key][line_key]['n'].append(entry['Global NX'])
        #    meas_figs[plot_key][line_key]['perf'].append(entry['MStencil/s  MAX']/1e3)
        N = entry['Global NX']**3 * entry['Number of time steps'] * entry[
            'Number of tests'] / 1e9
        # Memory
        if k['LIKWID performance counter'] == 'MEM':
            meas_figs[plot_key][line_key]['mem bw'].append(
                entry['Sustained Memory BW'] / 1e3)
            meas_figs[plot_key][line_key]['mem vol'].append(
                entry['Total Memory Transfer'] / N)
        # Energy
        elif k['LIKWID performance counter'] == 'ENERGY':
            entry['cpu energy pj/lup'] = entry['Energy'] / N
            entry['dram energy pj/lup'] = entry['Energy DRAM'] / N
            entry['total energy pj/lup'] = entry['cpu energy pj/lup'] + entry[
                'dram energy pj/lup']
            if (entry['total energy pj/lup'] < 3e3):
                #        entry['total energy pj/lup'] = 0
                meas_figs[plot_key][line_key]['total energy'].append(
                    entry['total energy pj/lup'])
            else:
                del meas_figs[plot_key][line_key]['n'][-1]
        # TLB
        elif k['LIKWID performance counter'] == 'TLB':
            meas_figs[plot_key][line_key]['tlb'].append(
                entry[hw_ctr_fields['TLB'][0][0]])
        # L2
        elif k['LIKWID performance counter'] == 'L2':
            meas_figs[plot_key][line_key]['l2 vol'].append(
                entry['L2 data volume sum'] / N)
        #L3
        elif k['LIKWID performance counter'] == 'L3':
            meas_figs[plot_key][line_key]['l3 vol'].append(
                entry['L3 data volume sum'] / N)
        #CPU
        elif k['LIKWID performance counter'] == 'DATA':
            meas_figs[plot_key][line_key]['data'].append(
                entry['Load to Store ratio avg'])
        #Diamond tiling data
        if (k['method'] == '1WD' or k['method'] == 'MWD'):
            meas_figs[plot_key][line_key]['diam width'].append(
                int(k['Intra-diamond width']))
            meas_figs[plot_key][line_key]['tgs'].append(
                int(k['Thread group size']))
            meas_figs[plot_key][line_key]['thx'].append(
                int(k['Threads along x-axis']))
            meas_figs[plot_key][line_key]['thy'].append(
                int(k['Threads along y-axis']))
            meas_figs[plot_key][line_key]['thz'].append(
                int(k['Threads along z-axis']))
            meas_figs[plot_key][line_key]['blk size'].append(
                int(k['Total cache block size (kiB)']) / 1024.0)
            meas_figs[plot_key][line_key]['bs_z'].append(
                int(k['Multi-wavefront updates']))

        # append the performance data
        plot_key = (entry['Precision'], k['stencil_name'])
        line_key = (k['mwdt'], k['method'])
        if plot_key not in perf_fig.keys():  # figure
            perf_fig[plot_key] = dict()

        perf_line = perf_fig[plot_key]
        if line_key not in perf_line.keys():  # line
            perf_line[line_key] = dict()

        perf_point = perf_line[line_key]
        nx = entry['Global NX']
        if nx not in perf_point.keys():  # points
            perf_point[nx] = [entry['MStencil/s  MAX'] / 1e3]
        else:
            perf_point[nx].append(entry['MStencil/s  MAX'] / 1e3)

    del raw_data

    #sort performance results
    for k, v in perf_fig.iteritems():
        for k2, v2 in perf_fig[k].iteritems():
            perf_line = perf_fig[k][k2]
            perf_fig[k][k2] = OrderedDict(
                sorted(perf_fig[k][k2].iteritems(), key=lambda x: x[0]))
#  for k,v in perf_fig.iteritems():
#    print(k, "##########")
#    for k2,v2 in perf_fig[k].iteritems():
#      print(k2,v2)

#sort the plot lines
    for p in meas_figs:
        for l in meas_figs[p]:
            pl = meas_figs[p][l]
            #remove unused fields
            empty = []
            for key, val in pl.iteritems():
                if (val == []):
                    empty.append(key)
            for key in empty:
                del pl[key]
            lines = []
            [lines.append(pl[val]) for val in measure_list if val in pl.keys()]

            lines = sorted(zip(*lines))
            idx = 0
            for val in measure_list:
                if (val in pl.keys()):
                    if (pl[val]):
                        pl[val] = [x[idx] for x in lines]
                        idx = idx + 1


#  for m,n in meas_figs.iteritems():
#    print "##############",m
#    for i,j in n.iteritems():
#      print i,j

    plot_all(perf_fig, meas_figs, machine_name)
def main():
  import sys
  from scripts.utils import get_stencil_num, load_csv
  from collections import OrderedDict

  raw_data = load_csv(sys.argv[1])


  req_fields = [('MStencil/s  MAX', float), ('Precision', int), ('Global NX', int), ('Number of time steps', int), ('Number of tests', int)]

  hw_ctr_fields = {
                    '':[],
                    'TLB':[('L1 DTLB miss rate sum', float)],
                    'DATA':[('Load to Store ratio avg', float)],
                    'L2':[('L2 data volume sum', float)],
                    'L3':[('L3 data volume sum', float)],
                    'MEM':[('Total Memory Transfer', float),('Sustained Memory BW', float)],
                    'ENERGY':[('Energy', float), ('Energy DRAM', float), ('Power',float), ('Power DRAM', float)]}


  duplicates = set()
  meas_figs = dict()
  perf_fig = dict()
  for k in raw_data:

    # get processor name from the file names
    if(k['OpenMP Threads']!=''):
      if(int(k['OpenMP Threads']) == 10):
        machine_name = 'ivb10'
      elif(int(k['OpenMP Threads']) == 18):
        machine_name = 'hw18'

    # Use single field to represent the performance
    if 'Total RANK0 MStencil/s MAX' in k.keys():
      if(k['Total RANK0 MStencil/s MAX']!=''):
        k['MStencil/s  MAX'] = k['MWD main-loop RANK0 MStencil/s MAX']
    # temporary for deprecated format
    if 'RANK0 MStencil/s  MAX' in k.keys():
      if k['RANK0 MStencil/s  MAX']!='':
        k['MStencil/s  MAX'] = k['RANK0 MStencil/s  MAX']


    # add stencil operator
    k['stencil'] = get_stencil_num(k)
    if   k['stencil'] == 0:
      k['stencil_name'] = '25_pt_const'
    elif k['stencil'] == 1:
      k['stencil_name'] = '7_pt_const'
    elif k['stencil'] == 4:
      k['stencil_name']  = '25_pt_var'
    elif k['stencil'] == 5:
      k['stencil_name']  = '7_pt_var'
    elif k['stencil'] == 6:
      k['stencil_name']  = 'solar'


    # add the approach
    if(k['Time stepper orig name'] == 'Spatial Blocking'):
      k['method'] = 'Spt.blk.'
    elif(k['Time stepper orig name'] in ['PLUTO', 'Pochoir']):
      k['method'] = k['Time stepper orig name']
    elif(k['Time stepper orig name'] == 'Diamond'):
      if('_tgs1_' in k['file_name']):
        k['method'] = '1WD'
      else:
        k['method'] = 'MWD'
    else:
      print("ERROR: Unknow time stepper")
      raise

    # add mwd type
    k['mwdt']='none'
    if(k['method'] == 'MWD'):
      mwd = k['Wavefront parallel strategy'].lower()
      if('fixed' in mwd) and ('relaxed' in mwd):
        k['mwdt'] = 'fers'
      elif('fixed' in mwd):
        k['mwdt'] = 'fe'
      elif('relaxed' in mwd):
        k['mwdt'] = 'rs'
      elif('wavefront' in mwd):
        k['mwdt'] = 'block'


    # add precision information
    p = 1 if k['Precision'] in 'DP' else 0
    k['Precision'] = p


    # TLB measurement for LIKWID 4
    if 'L1 DTLB load miss rate avg' in k.keys():
      if k['L1 DTLB load miss rate avg']!='':
        hw_ctr_fields['TLB'] =  [('L1 DTLB load miss rate avg', float)]
        hw_ctr_labels['TLB'] =  [('L1 DTLB load miss rate avg', 'tlb_', 'tlb')]

    entry = {}
    # parse the general fileds' format
    for f in req_fields + hw_ctr_fields[k['LIKWID performance counter']]:
      try:
        entry[f[0]] = map(f[1], [k[f[0]]] )[0]
      except:
        print("ERROR: results entry missing essential data at file:%s"%(k['file_name']))
        print f[0]
        print k
        return

    #find repeated data
    key = (entry['Precision'], k['stencil_name'], k['LIKWID performance counter'], k['mwdt'], k['method'], entry['Global NX'])
    if key not in duplicates:
      duplicates.add(key)
    else:
      print("Repeated result at: %s"%(k['file_name']))
      continue


    # Initialize plot entry if does not exist for current data entry
#    for m,n in entry.iteritems(): print m,n
    measure_list = ['n', 'perf', 'total energy', 'tlb', 'mem bw', 'l2 bw', 'l3 bw', 'mem vol', 'l2 vol', 'l3 vol', 'data', 'tgs', 'thx', 'thy', 'thz', 'blk size', 'diam width', 'bs_z']
    plot_key = (entry['Precision'], k['stencil_name'], k['LIKWID performance counter'])
    line_key = (k['mwdt'], k['method'])
    if plot_key not in meas_figs.keys():
      meas_figs[plot_key] = {}
    if line_key not in meas_figs[plot_key].keys():
      meas_figs[plot_key][line_key] = {meas:[] for meas in measure_list}

    # append the measurement data
    meas_figs[plot_key][line_key]['n'].append(entry['Global NX'])
#    meas_figs[plot_key][line_key]['perf'].append(entry['MStencil/s  MAX']/1e3)
    N = entry['Global NX']**3 * entry['Number of time steps'] * entry['Number of tests']/1e9
    # Memory
    if k['LIKWID performance counter'] == 'MEM':
      meas_figs[plot_key][line_key]['mem bw'].append(entry['Sustained Memory BW']/1e3)
      meas_figs[plot_key][line_key]['mem vol'].append(entry['Total Memory Transfer']/N)
    # Energy
    elif k['LIKWID performance counter'] == 'ENERGY':
      entry['cpu energy pj/lup'] = entry['Energy']/N
      entry['dram energy pj/lup'] = entry['Energy DRAM']/N
      entry['total energy pj/lup'] = entry['cpu energy pj/lup'] + entry['dram energy pj/lup']
      if (entry['total energy pj/lup'] < 3e3):
#        entry['total energy pj/lup'] = 0
        meas_figs[plot_key][line_key]['total energy'].append(entry['total energy pj/lup'])
      else:
        del meas_figs[plot_key][line_key]['n'][-1]
    # TLB
    elif k['LIKWID performance counter'] == 'TLB':
      meas_figs[plot_key][line_key]['tlb'].append(entry[ hw_ctr_fields['TLB'][0][0] ])
    # L2
    elif k['LIKWID performance counter'] == 'L2':
      meas_figs[plot_key][line_key]['l2 vol'].append(entry['L2 data volume sum']/N)
    #L3
    elif k['LIKWID performance counter'] == 'L3':
      meas_figs[plot_key][line_key]['l3 vol'].append(entry['L3 data volume sum']/N)
    #CPU
    elif k['LIKWID performance counter'] == 'DATA':
      meas_figs[plot_key][line_key]['data'].append(entry['Load to Store ratio avg'])
    #Diamond tiling data
    if(k['method'] == '1WD' or k['method'] == 'MWD'):
      meas_figs[plot_key][line_key]['diam width'].append(int(k['Intra-diamond width']))
      meas_figs[plot_key][line_key]['tgs'].append(int(k['Thread group size']))
      meas_figs[plot_key][line_key]['thx'].append(int(k['Threads along x-axis']))
      meas_figs[plot_key][line_key]['thy'].append(int(k['Threads along y-axis']))
      meas_figs[plot_key][line_key]['thz'].append(int(k['Threads along z-axis']))
      meas_figs[plot_key][line_key]['blk size'].append(int(k['Total cache block size (kiB)'])/1024.0)
      meas_figs[plot_key][line_key]['bs_z'].append(int(k['Multi-wavefront updates']))

    # append the performance data
    plot_key = (entry['Precision'], k['stencil_name'])
    line_key = (k['mwdt'], k['method'])
    if plot_key not in perf_fig.keys(): # figure
      perf_fig[plot_key] = dict()

    perf_line = perf_fig[plot_key]
    if line_key not in perf_line.keys(): # line
      perf_line[line_key] = dict()

    perf_point = perf_line[line_key]
    nx = entry['Global NX']
    if nx not in perf_point.keys(): # points
      perf_point[nx] = [entry['MStencil/s  MAX']/1e3]
    else:
      perf_point[nx].append(entry['MStencil/s  MAX']/1e3)


  del raw_data

  #sort performance results
  for k,v in perf_fig.iteritems():
    for k2,v2 in perf_fig[k].iteritems():
      perf_line = perf_fig[k][k2]
      perf_fig[k][k2] = OrderedDict(sorted(perf_fig[k][k2].iteritems(), key=lambda x:x[0]))
#  for k,v in perf_fig.iteritems():
#    print(k, "##########")
#    for k2,v2 in perf_fig[k].iteritems():
#      print(k2,v2)


  #sort the plot lines
  for p in meas_figs:
    for l in meas_figs[p]:
      pl = meas_figs[p][l]
      #remove unused fields
      empty = []
      for key, val in pl.iteritems():
        if(val==[]):
          empty.append(key)
      for key in empty:
          del pl[key]
      lines = []
      [lines.append(pl[val]) for val in measure_list if val in pl.keys()]

      lines = sorted(zip(*lines))
      idx=0
      for val in measure_list:
        if(val in pl.keys()):
          if(pl[val]):
            pl[val] = [x[idx] for x in lines]
            idx = idx+1

#  for m,n in meas_figs.iteritems():
#    print "##############",m
#    for i,j in n.iteritems():
#      print i,j

  plot_all(perf_fig, meas_figs, machine_name)
Exemplo n.º 9
0
def main():
    from scripts.utils import create_project_tarball, get_stencil_num
    from scripts.conf.conf import machine_conf, machine_info
    import os, sys
    from csv import DictReader
    import time, datetime

    dry_run = 1 if len(sys.argv) < 2 else int(sys.argv[1])

    time_stamp = datetime.datetime.fromtimestamp(
        time.time()).strftime('%Y%m%d_%H_%M')
    exp_name = "pluto_thread_scaling_at_%s_%s" % (machine_info['hostname'],
                                                  time_stamp)

    tarball_dir = 'results/' + exp_name
    if (dry_run == 0): create_project_tarball(tarball_dir, "test_" + exp_name)
    target_dir = 'results/' + exp_name

    # parse the results to find out which of the already exist
    data = []
    data_file = os.path.join('results', 'summary.csv')
    try:
        with open(data_file, 'rb') as output_file:
            raw_data = DictReader(output_file)
            for k in raw_data:
                kernel = get_stencil_num(k)
                if (kernel == 0):
                    k['stencil'] = '3d25pt'
                elif (kernel == 1):
                    k['stencil'] = '3d7pt'
                elif (kernel == 4):
                    k['stencil'] = '3d25pt_var'
                elif (kernel == 5):
                    k['stencil'] = '3d7pt_var'
                else:
                    raise
                data.append(k)
    except:
        pass
    param_l = dict()
    for k in data:
        try:
            param_l[(k['stencil'], int(k['Global NX']),
                     k['LIKWID performance counter'])] = ([
                         int(k['PLUTO tile size of loop 1']),
                         int(k['PLUTO tile size of loop 3']),
                         int(k['PLUTO tile size of loop 4'])
                     ], int(k['Number of time steps']))
        except:
            print k
            raise

    count = 0
    for group in ['MEM', 'L2']:
        #  for group in ['MEM', 'L2', 'L3', 'DATA', 'TLB_DATA', 'ENERGY']:
        if (machine_info['hostname'] == 'Haswell_18core'):
            machine_conf['pinning_args'] = " -m -g " + group + " -C S1:0-"
        elif (machine_info['hostname'] == 'IVB_10core'):
            if group == 'TLB_DATA': group = 'TLB'
            machine_conf['pinning_args'] = " -g " + group + " -C S0:0-"


#    for k,v in param_l.iteritems(): print k,v
        count = count + thread_scaling_test(
            dry_run, target_dir, exp_name, param_l=param_l, group=group)

    print "experiments count =" + str(count)
Exemplo n.º 10
0
def parse_entry_info(k, is_tgs_only):
    from scripts.utils import get_stencil_num, load_csv

    #    # get processor name from the file names
    #    if(k['OpenMP Threads']!=''):
    #      if(int(k['OpenMP Threads']) == 10):
    #        machine_name = 'ivb10'
    #      elif(int(k['OpenMP Threads']) == 18):
    #        machine_name = 'hw18'

    # Use single field to represent the performance
    if "Total RANK0 MStencil/s MAX" in k.keys():
        if k["Total RANK0 MStencil/s MAX"] != "":
            k["MStencil/s  MAX"] = k["MWD main-loop RANK0 MStencil/s MAX"]
    # temporary for deprecated format
    if "RANK0 MStencil/s  MAX" in k.keys():
        if k["RANK0 MStencil/s  MAX"] != "":
            k["MStencil/s  MAX"] = k["RANK0 MStencil/s  MAX"]

    # add stencil operator
    k["stencil"] = get_stencil_num(k)
    if k["stencil"] == 0:
        k["stencil_name"] = "25_pt_const"
    elif k["stencil"] == 1:
        k["stencil_name"] = "7_pt_const"
    elif k["stencil"] == 4:
        k["stencil_name"] = "25_pt_var"
    elif k["stencil"] == 5:
        k["stencil_name"] = "7_pt_var"
    elif k["stencil"] == 6:
        k["stencil_name"] = "solar"

    # add the approach
    if k["Time stepper orig name"] == "Spatial Blocking":
        k["method"] = "Spt.blk."
    elif k["Time stepper orig name"] in ["PLUTO", "Pochoir"]:
        k["method"] = k["Time stepper orig name"]
    elif k["Time stepper orig name"] == "Diamond":
        if "_tgs1_" in k["file_name"]:
            k["method"] = "1WD"
        else:
            k["method"] = "MWD"
    else:
        print ("ERROR: Unknow time stepper")
        raise

    # TEMPORARY: Older versions of Pochoir did not have OpenMP threads count
    if k["method"] == "Pochoir" and k["OpenMP Threads"] == "":
        k["OpenMP Threads"] = 0

    # add mwd type
    k["mwdt"] = "none"
    if k["method"] == "MWD":
        mwd = k["Wavefront parallel strategy"].lower()
        if ("fixed" in mwd) and ("relaxed" in mwd):
            k["mwdt"] = "fers"
        elif "fixed" in mwd:
            k["mwdt"] = "fe"
        elif "relaxed" in mwd:
            k["mwdt"] = "rs"
        elif "wavefront" in mwd:
            k["mwdt"] = "block"

    # add precision information
    p = 1 if k["Precision"] in "DP" else 0
    k["Precision"] = p

    # TLB measurement for LIKWID 4
    if "L1 DTLB load miss rate avg" in k.keys():
        if k["L1 DTLB load miss rate avg"] != "":
            hw_ctr_fields["TLB"] = [("L1 DTLB load miss rate avg", float)]
            hw_ctr_labels["TLB"] = [("L1 DTLB load miss rate avg", "tlb_", "tlb")]

    # parse the general fileds' format
    for f in req_fields + hw_ctr_fields[k["LIKWID performance counter"]]:
        try:
            k[f[0]] = map(f[1], [k[f[0]]])[0]
        except:
            print ("ERROR: results entry missing essential data at file:%s" % (k["file_name"]))
            print f[0]
            print k
            return

    # Parse the L2 data per thread
    if k["LIKWID performance counter"] == "L2":
        k["L2 vol list"] = []
        for i in range(k["OpenMP Threads"]):
            field = "L2 data volume core %d" % i
            if field in k.keys():
                k["L2 vol list"].append(float(k["L2 data volume core %d" % i]))

    k["tgsl"] = k["Thread group size"]
    if is_tgs_only == 0:  # regular mode for all MWD
        if k["method"] == "MWD":
            k["tgsl"] = 100