Exemple #1
0
def classify_and_eval_h_rand(test_file):
    def add_l2r_score_rand(infile, outfile):
        from itertools import izip, dropwhile
        from random import randint, seed
        seed()
        with open(infile) as fin, open(outfile, "w") as fout:
            for job_str in dropwhile(swf_skip_hdr, fin):
                new_score_str = str(randint(0, 1000000))
                job = [u for u in job_str.strip().split()]
                new_job_str = ' '.join(job[:-1] + [new_score_str])
                fout.write(new_job_str + '\n')

    print "[Simulating the test file using L2R..]"
    test_l2r_fn = "%s_with_score.swf" % test_file.split('.')[0]
    add_l2r_score_rand(test_file, test_l2r_fn)

    # Simulate the file after l2r
    config["scheduler"]["name"] = 'l2r_maui_scheduler'
    config["weights"] = (0, 0, 0, 0, 0, 0, 1)
    config["input_file"] = test_l2r_fn
    config["output_swf"] = "%s_rnd_sim.swf" % test_l2r_fn.split('.')[0]
    parse_and_run_simulator(config)

    l2r_bsld = PerfMeasure(config["output_swf"])
    l2r_bsld_avg, l2r_bsld_med, l2r_bsld_max = l2r_bsld.all()
    print "+ [RND] Average Bounded-Slowdown:", l2r_bsld_avg
    print "+ [RND] Median Bounded-Slowdown:", l2r_bsld_med
    print "+ [RND] Max Bounded-Slowdown:", l2r_bsld_max

    return (l2r_bsld_avg, l2r_bsld_med, l2r_bsld_max)
Exemple #2
0
def simulate_scheduler(path, sch_name):

	config["scheduler"]["name"] = sch_name # 'easy_sjbf_scheduler'
	config["input_file"] = path
	config["output_swf"] = rel("%s_%s.out" % (simple_name(path), sch_name))
	parse_and_run_simulator(config)
	return config["output_swf"]
Exemple #3
0
def simulate_scheduler(path, sch_name):

    config["scheduler"]["name"] = sch_name  # 'easy_sjbf_scheduler'
    config["input_file"] = path
    config["output_swf"] = rel("%s_%s.out" % (simple_name(path), sch_name))
    parse_and_run_simulator(config)
    return config["output_swf"]
Exemple #4
0
def classify_and_eval_h_rand(test_file):

	def add_l2r_score_rand(infile, outfile):
		from itertools import izip, dropwhile
		from random import randint, seed
		seed()
		with open(infile) as fin, open(outfile, "w") as fout:
			for job_str in dropwhile(swf_skip_hdr, fin):
				new_score_str = str(randint(0, 1000000))
				job = [u for u in job_str.strip().split()]
				new_job_str = ' '.join(job[:-1] + [new_score_str])
				fout.write(new_job_str + '\n')


	print "[Simulating the test file using L2R..]"
	test_l2r_fn = "%s_with_score.swf" % test_file.split('.')[0]
	add_l2r_score_rand(test_file, test_l2r_fn)

	# Simulate the file after l2r
	config["scheduler"]["name"] = 'l2r_maui_scheduler'
	config["weights"] = (0, 0, 0, 0, 0, 0, 1)
	config["input_file"] = test_l2r_fn
	config["output_swf"] = "%s_rnd_sim.swf" % test_l2r_fn.split('.')[0]
	parse_and_run_simulator(config)

	l2r_bsld = PerfMeasure(config["output_swf"])
	l2r_bsld_avg, l2r_bsld_med, l2r_bsld_max = l2r_bsld.all()
	print "+ [RND] Average Bounded-Slowdown:", l2r_bsld_avg
	print "+ [RND] Median Bounded-Slowdown:", l2r_bsld_med
	print "+ [RND] Max Bounded-Slowdown:", l2r_bsld_max

	return (l2r_bsld_avg, l2r_bsld_med, l2r_bsld_max)
Exemple #5
0
def simulate_scheduler_in_memory(path, sch_name):

    config["scheduler"]["name"] = sch_name  # 'easy_sjbf_scheduler'
    config["input_file"] = path
    parse_and_run_simulator(config)
    jobs = config["terminated_jobs"]
    del config["terminated_jobs"]
    return jobs
Exemple #6
0
def simulate_scheduler_in_memory(path, sch_name):

	config["scheduler"]["name"] = sch_name # 'easy_sjbf_scheduler'
	config["input_file"] = path
	parse_and_run_simulator(config)
	jobs = config["terminated_jobs"]
	del config["terminated_jobs"]
	return jobs
Exemple #7
0
def simulate(path):
	out_swf = []
	for idx, w in enumerate(weights_options):
		config["weights"] = w
		config["input_file"] = path
		config["output_swf"] = rel("%s_%d.swf" % (simple_name(path), idx))
		parse_and_run_simulator(config)
		out_swf.append(config["output_swf"])

	return out_swf
Exemple #8
0
def simulate(path):
    out_swf = []
    for idx, w in enumerate(weights_options):
        config["weights"] = w
        config["input_file"] = path
        config["output_swf"] = rel("%s_%d.swf" % (simple_name(path), idx))
        parse_and_run_simulator(config)
        out_swf.append(config["output_swf"])

    return out_swf
 def simulate(path):
     config = OnlineL2RMauiScheduler.config
     out_swf = []
     for idx, w in enumerate(OnlineL2RMauiScheduler.weights_options):
         config["weights"] = w
         config["input_file"] = path
         config["output_swf"] = "%s_%d_%d.swf" % (path.split('.')[0], self.curr_part_num, idx)
         parse_and_run_simulator(config)
         out_swf.append(config["output_swf"])
         # out_swf.append(config["terminated_jobs"])
     # gc.collect() # to ensure that the output files are closed
     return out_swf
 def simulate(path):
     config = AdaptiveMauiScheduler.config
     out_swf = []
     for idx, w in enumerate(AdaptiveMauiScheduler.weights_options):
         config["weights"] = w
         config["input_file"] = path
         config["output_swf"] = "%s_%d_%d.swf" % (path.split('.')[0], self.curr_part_num, idx)
         parse_and_run_simulator(config)
         out_swf.append((w, config["output_swf"]))
         # out_swf.append(config["terminated_jobs"])
     # gc.collect() # to ensure that the output files are closed
     return out_swf
def launchExpe(options, worker_id):
	with expe_counter.get_lock():
		expe_counter.value += 1
		myid = expe_counter.value
	
	#if not ( os.path.isfile(options["output_swf"]) ):
	print bcolors.WARNING+"Start expe "+str(myid)+" on w"+str(worker_id)+ bcolors.ENDC+" : "+str(options)
	error = False
	tempout = sys.stdout
	sys.stdout = open(options["output_swf"]+".out", 'w')
	sys.stderr = sys.stdout
	try:
		parse_and_run_simulator(options)
	except Exception,e:
		print "Exception: "+str(e)
		error = str(e)
Exemple #12
0
def launchExpe(options, worker_id):
    with expe_counter.get_lock():
        expe_counter.value += 1
        myid = expe_counter.value

    #if not ( os.path.isfile(options["output_swf"]) ):
    print bcolors.WARNING + "Start expe " + str(myid) + " on w" + str(
        worker_id) + bcolors.ENDC + " : " + str(options)
    error = False
    tempout = sys.stdout
    sys.stdout = open(options["output_swf"] + ".out", 'w')
    sys.stderr = sys.stdout
    try:
        parse_and_run_simulator(options)
    except Exception, e:
        print "Exception: " + str(e)
        error = str(e)
Exemple #13
0
def launchExpe(options):
	myid = next_id()
	
	if not ( skip_always_done and os.path.isfile(options["output_swf"]) ):
		print bcolors.WARNING+"Start expe "+str(myid)+ bcolors.ENDC+" : "+str(options)
		error = False
		tempout = sys.stdout
		sys.stdout = open(options["output_swf"]+".out", 'w')
		sys.stderr = sys.stdout
		try:
			parse_and_run_simulator(options)
		except Exception,e:
			print "Exception: "+str(e)
			error = str(e)
		sys.stdout = tempout
		if not error:
			print bcolors.OKBLUE+"End   epxe "+str(myid)+ bcolors.ENDC
		else:
			print bcolors.FAIL+"ERROR on "+str(myid)+": "+str(e)+ bcolors.ENDC
Exemple #14
0
def classify_and_eval_h(test_file):

    global min_max

    print "[Creating the ML testing file..]"
    mat = extract_columns(test_file, indices)
    mat = normalize_mat(mat, min_max)
    features = convert_to_ml_format(mat, 0)

    write_lines_to_file(rel(test_fn), features)

    print "[Classifying/Testing..]"
    batchL2Rlib.classify(test_fn, model_fn, score_fn)

    print "[Simulating the test file using L2R..]"
    # add the l2r to the test log as the think time.
    test_l2r_fn = "%s_with_score.swf" % test_file.split('.')[0]
    add_l2r_score(test_file, rel(score_fn), test_l2r_fn)

    # Simulate the file after l2r
    config["scheduler"]["name"] = 'l2r_maui_scheduler'
    config["weights"] = (0, 0, 0, 0, 0, 0, 1)
    config["input_file"] = test_l2r_fn
    config["output_swf"] = "%s_sim.swf" % test_l2r_fn.split('.')[0]
    parse_and_run_simulator(config)

    l2r_bsld = PerfMeasure(config["output_swf"])
    l2r_bsld_avg, l2r_bsld_med, l2r_bsld_max = l2r_bsld.all()
    print "+ [L2R] Average Bounded-Slowdown:", l2r_bsld_avg
    print "+ [L2R] Median Bounded-Slowdown:", l2r_bsld_med
    print "+ [L2R] Max Bounded-Slowdown:", l2r_bsld_max
    #print "+ [L2R] System Utilization:", compute_utilisation(config["output_swf"])

    res_data.extend(
        [str(u) for u in (l2r_bsld_avg, l2r_bsld_med, l2r_bsld_max)])
    res_data.append(' ')

    return (l2r_bsld_avg, l2r_bsld_med, l2r_bsld_max)
Exemple #15
0
def classify_and_eval_h(test_file):

	global min_max

	print "[Creating the ML testing file..]"
	mat = extract_columns(test_file, indices)
	mat = normalize_mat(mat, min_max)
	features = convert_to_ml_format(mat, 0)

	write_lines_to_file(rel(test_fn), features)

	print "[Classifying/Testing..]"
	batchL2Rlib.classify(test_fn, model_fn, score_fn)


	print "[Simulating the test file using L2R..]"
	# add the l2r to the test log as the think time.
	test_l2r_fn = "%s_with_score.swf" % test_file.split('.')[0]
	add_l2r_score(test_file, rel(score_fn), test_l2r_fn)

	# Simulate the file after l2r
	config["scheduler"]["name"] = 'l2r_maui_scheduler'
	config["weights"] = (0, 0, 0, 0, 0, 0, 1)
	config["input_file"] = test_l2r_fn
	config["output_swf"] = "%s_sim.swf" % test_l2r_fn.split('.')[0]
	parse_and_run_simulator(config)

	l2r_bsld = PerfMeasure(config["output_swf"])
	l2r_bsld_avg, l2r_bsld_med, l2r_bsld_max = l2r_bsld.all()
	print "+ [L2R] Average Bounded-Slowdown:", l2r_bsld_avg
	print "+ [L2R] Median Bounded-Slowdown:", l2r_bsld_med
	print "+ [L2R] Max Bounded-Slowdown:", l2r_bsld_max
	#print "+ [L2R] System Utilization:", compute_utilisation(config["output_swf"])

	res_data.extend([str(u) for u in (l2r_bsld_avg, l2r_bsld_med, l2r_bsld_max)])
	res_data.append(' ')

	return (l2r_bsld_avg, l2r_bsld_med, l2r_bsld_max)
#! /usr/bin/env python2

from run_simulator import parse_and_run_simulator

options = {
"scheduler" : {
	"name":'TBD'
	},
"input_file": '../../../data/CEA-curie_sample/original_swf/log.swf',
"num_processors" : 80640,
"stats" : False,
"output_swf" : 'TBD'
}

for sched in ('easy_backfill_scheduler', "easy_sjbf_scheduler"):
	options["scheduler"]["name"] = sched
	options["output_swf"] = "res_"+sched+".swf"
	parse_and_run_simulator(options)
Exemple #17
0
        sim_statistics(log_path, 'online_l2r_maui_scheduler')

    # simulate with all the schedulers
    if arguments["-a"]:
        sim_statistics(log_path, 'adaptive_maui_scheduler')

    # simulate with diff weights
    if arguments["-w"]:
        if redirect_sim_output: sys.stdout = open('/dev/null', 'w')

        vals = []
        try:
            for w in weights_options:
                config["weights"] = w
                config["input_file"] = log_path
                parse_and_run_simulator(config)
                sim_jobs = config["terminated_jobs"]
                del config["terminated_jobs"]
                obj_fun_val = PerfMeasure(jobs=sim_jobs).average()
                vals.append((w, obj_fun_val))
                print >> sys.__stdout__, vals[-1]
        except:
            pass
        vals.sort(key=lambda u: u[1])
        print >> sys.__stdout__
        for i in range(min(10, len(vals))):
            print >> sys.__stdout__, vals[i]

        if redirect_sim_output: sys.stdout = sys.__stdout__

    # train offline
#! /usr/bin/env python2

from run_simulator import parse_and_run_simulator

options = {
    "scheduler": {
        "name": 'TBD'
    },
    "input_file": '../../../data/CEA-curie_sample/original_swf/log.swf',
    "num_processors": 80640,
    "stats": False,
    "output_swf": 'TBD'
}

for sched in ('easy_backfill_scheduler', "easy_sjbf_scheduler"):
    options["scheduler"]["name"] = sched
    options["output_swf"] = "res_" + sched + ".swf"
    parse_and_run_simulator(options)
Exemple #19
0
		sim_statistics(log_path, 'online_l2r_maui_scheduler')

	# simulate with all the schedulers
	if arguments["-a"]:
		sim_statistics(log_path, 'adaptive_maui_scheduler')

	# simulate with diff weights
	if arguments["-w"]:
		if redirect_sim_output:	sys.stdout = open('/dev/null', 'w')

		vals = []
		try:
			for w in weights_options:
				config["weights"] = w
				config["input_file"] = log_path
				parse_and_run_simulator(config)
				sim_jobs = config["terminated_jobs"]
				del config["terminated_jobs"]
				obj_fun_val = PerfMeasure(jobs=sim_jobs).average()
				vals.append((w, obj_fun_val))
				print >> sys.__stdout__, vals[-1]
		except:
			pass
		vals.sort(key= lambda u: u[1])
		print >> sys.__stdout__
		for i in range(min(10, len(vals))):
			print >> sys.__stdout__, vals[i]

		if redirect_sim_output: sys.stdout = sys.__stdout__

	# train offline