def main(qos_app, driver_params, rep): global SIZES self_pin(MAX_CORE + 1) for size in SIZES: dumpDirName = 'sensitivity_data/%(qos_app)s' % locals() dumpDirName = 'mkdir -p ' + dumpDirName os.system(dumpDirName) output_base = 'sensitivity_data/%(qos_app)s/%(qos_app)s.%(size)d.%(rep)s' % locals( ) qos_data_dir = None bubble_proc = None qos_pid = None try: bubble_proc = run_bubble(size) qos_data_dir = driver.create_qos_app_directory() qos_proc, qos_pid = driver.start_and_load_qos( qos_app, qos_data_dir, QOS_CORES, driver_params) driver.run_driver(output_base, qos_app, qos_pid, DRIVER_CORES, driver_params) finally: if bubble_proc is not None: driver.kill_process_group(bubble_proc) if qos_pid is not None: driver.kill_process_group(qos_proc) time.sleep(15) # Wait for QoS application to fully terminate if qos_data_dir is not None: driver.remove_dir(qos_data_dir)
def run_experiment(params, output_base, rep): """ old format (suite bmark #cores)+ new format: (suite bmark #cores)+ qos_app driver_params output_base rep """ #print params #return applications = [ ] # subrata need to save the benchmark results in a file. and pass the name of that file here for i in range( int((len(params) - 4) / 3) ): # subrata : parsing of the already created experiment list generated by "create_experiment.py" suite = params[3 * i] bmark = params[1 + 3 * i] cores = int(params[2 + 3 * i]) applications.append([suite, bmark, cores]) logging.info('Starting experiment with %d applications' % (len(applications))) driver_params = params[-3] qos_app = params[-4] #driverWorkload = params[-3] # Cab contains 8 cores per SMP #reporter_core = 0 #subrata : reporter will be replaced by mongodB (interactive) => use two cores. YCSB will be in the other socket 2 cores anything between (8-15) qos_cores = [0, 1] #subrata : specify the cores that qos app would use starting_core = 2 ending_core = 7 current_core = 2 driver_cores = [8, 9] mpstat_core = 14 max_core = 15 core_allocations = [] for application in applications: cores = int(application[2]) if current_core + cores - 1 <= ending_core: core_allocations.append( list(range(current_core, current_core + cores))) current_core += cores # This is appearently important... else: # TODO, error pass logging.info('Applications: ' + str(applications)) # Pin ourself to the other socket #self_pin(ending_core+1) # subrata : self pining of "this" python driver. similarly pin the benchmark driver YCSB/ apache bench etc. Pin tghem on the other socket to reduce intf self_pin( max_core ) # subrata : self pining of "this" python driver. similarly pin the benchmark driver YCSB/ apache bench etc. Pin tghem on the other socket to reduce intf #subrata: based on the already generated unique output path, create a temporary data store path for qos app (in /tmp/) #now run and initialize the qos app...at the end of the experiment we will kill this qos app, so at this moment do not worry about the state qos_pid = None qos_data_dir = None try: qos_data_dir = driver.create_qos_app_directory() qos_proc, qos_pid = driver.start_and_load_qos(qos_app, qos_data_dir, qos_cores, driver_params) ensure_data_dir(qos_app) experiment = ".".join([ '%(suite)s_%(bmark)s_%(cores)d' % locals() for suite, bmark, cores in applications ]) threads = [] # Launch batch applications... for i in range(len(applications)): application = applications[i] cores = core_allocations[i] suite = application[0] bmark = application[1] if suite == 'parsec': # Notice that the parameters here reflect the def wrapper() function in the decorator threads.append(run_parsec(add_slot(), bmark, cores)) elif suite == 'spec_fp' or suite == 'spec_int': threads.append(run_spec(add_slot(), bmark, cores)) else: raise Exception('Bad suite: %(suite)s' % locals()) logging.info('Sleeping to allow batch applictions to start...') time.sleep(30) #subrata: run mpstat to measure utilizations mpstat_output_filename = '%(output_base)s.mpstat' % locals() mpstat_proc = run_mpstat(add_slot(), mpstat_output_filename, 5) #time.sleep(10) #sys.exit(1) try: logging.info('Starting driver') driver.run_driver(output_base, qos_app, qos_pid, driver_cores, driver_params) logging.info('Finished running driver') except Exception as e: logging.exception("Error: Failed to start reporter") with lock: for key in procs: if procs[key] is not None: driver.kill_process_group(procs[key]) procs[key] = None for thread in threads: thread.join() sys.exit(1) # Kill the benchmark threads as the main thread running the driver has finished with lock: for key in procs: if procs[key] is not None: driver.kill_process_group(procs[key]) procs[key] = None # Wait for threads to return for thread in threads: thread.join() except Exception as e: logging.exception('Problem while running experiment' + str(e)) finally: #subrata: now kill the qos app as well. We will relaunch it during next experiment run if qos_proc.poll() is None: driver.kill_process_group(qos_proc) time.sleep(10) # Wait for QoS app to fully terminate #subrata: now since this experiment has completed, remove the directory used for data store if qos_data_dir is not None: driver.remove_dir(qos_data_dir)