Esempio n. 1
0
def run_simulation(simulation, scenario, procs, wd):
    print("\nRunning the %s scenario ..." % (scenario.scenario_name))
    apl = []
    scenario.set_time_request_method(wd)

    apl = wd.generate_workload()
    apl = apl[scenario.get_remove_entries_count():]
    sch = ScheduleFlow.BatchScheduler(ScheduleFlow.System(procs))

    simulation.create_scenario(scenario.scenario_name, sch)
    simulation.add_applications(apl)
    ret = simulation.run()
    return ret
Esempio n. 2
0
def run_scenario(num_procssing_units, job_list):
    simulator = ScheduleFlow.Simulator(check_correctness=True,
                                       generate_gif=True,
                                       output_file_handler=sys.stdout)
    sch = ScheduleFlow.BatchScheduler(
        ScheduleFlow.System(num_processing_units))
    simulator.create_scenario("test_batch", sch, job_list=job_list)
    simulator.run()

    sch = ScheduleFlow.OnlineScheduler(
        ScheduleFlow.System(num_processing_units))
    simulator.create_scenario("test_online", sch, job_list=job_list)
    simulator.run()
def run_simulation(simulation, scenario, procs, wd, small_jobs):
    print("\nRunning the %s scenario ..." % (scenario.scenario_name))
    apl = []
    scenario.set_time_request_method(wd)

    apl = wd.generate_workload()
    apl += small_jobs

    sch = ScheduleFlow.BatchScheduler(ScheduleFlow.System(procs))

    simulation.create_scenario(scenario.scenario_name, sch)
    simulation.add_applications(apl)
    simulation.run()
    ret = simulation.get_execution_log()
    return max([max([i[1] for i in ret[job]]) for job in ret])
def run_scenario(num_processing_units, job_list):
    simulator = ScheduleFlow.Simulator(check_correctness=True,
                                       generate_gif=True,
                                       output_file_handler=sys.stdout)
    sch = ScheduleFlow.BatchScheduler(
        ScheduleFlow.System(num_processing_units))
    simulator.create_scenario(sch,
                              job_list=job_list,
                              scenario_name="test_batch")
    simulator.run()

    sch = ScheduleFlow.BatchScheduler(
        ScheduleFlow.System(num_processing_units), total_queues=2)
    simulator.create_scenario(sch,
                              job_list=job_list,
                              scenario_name="test_batch")
    simulator.run()
Esempio n. 5
0
 def test_metrics_full(self):
     distr = Workload.ConstantDistr(1)
     wd = Workload.Workload(distr, 5)
     wd.set_processing_units(distribution=Workload.ConstantDistr(10))
     sequence = [1.5, 3]
     wd.set_request_time(request_sequence=sequence)
     apl = wd.generate_workload()
     sch = ScheduleFlow.BatchScheduler(ScheduleFlow.System(10))
     simulator = ScheduleFlow.Simulator(output_file_handler=None)
     simulator.create_scenario("test_batch", sch, job_list=apl)
     simulator.run()
     stats = simulator.stats
     self.assertEqual(stats.total_makespan(), 7 * 3600)
     self.assertAlmostEqual(stats.system_utilization(), 5 / 7, places=3)
     self.assertAlmostEqual(stats.average_job_utilization(),
                            10 / 15,
                            places=3)
     self.assertEqual(stats.average_job_wait_time(), 3 * 3600)
     self.assertEqual(stats.average_job_response_time(), 4 * 3600)
     self.assertAlmostEqual(stats.average_job_stretch(), 4, places=3)
Esempio n. 6
0
 def test_metrics_failure(self):
     distr = Workload.ConstantDistr(1)
     wd = Workload.Workload(distr, 5)
     wd.set_processing_units(distribution=Workload.ConstantDistr(10))
     wd.set_request_time(request_function=self.__req_incremental_time)
     apl = wd.generate_workload()
     sch = ScheduleFlow.BatchScheduler(ScheduleFlow.System(10))
     simulator = ScheduleFlow.Simulator(output_file_handler=None)
     simulator.create_scenario("test_batch", sch, job_list=apl)
     simulator.run()
     stats = simulator.stats
     self.assertAlmostEqual(stats.total_makespan(), 28200, places=3)
     self.assertAlmostEqual(stats.system_utilization(), 0.6383, places=3)
     self.assertAlmostEqual(stats.average_job_utilization(),
                            0.6293,
                            places=3)
     self.assertEqual(stats.average_job_wait_time(), 9000)
     self.assertAlmostEqual(stats.average_job_response_time(),
                            15000,
                            places=3)
     self.assertAlmostEqual(stats.average_job_stretch(), 4.1666, places=3)
    if arg_list['save_results'] is not None:
        outf = open(arg_list['save_results'], "a")
        outf.write("Distribution : %s : %s\n" %
                   (arg_list['distribution'], arg_list['param']))

    distr = create_workload(arg_list['distribution'], arg_list['param'],
                            arg_list['jobs'], outf)
    if distr is None:
        if arg_list['save_results'] is not None:
            outf.close()
        logger.error(
            "Distribution %s was not found in the implemented classes" %
            (arg_list['distribution']))
        exit()

    simulation_z0 = ScheduleFlow.Simulator(loops=1)
    simulation = ScheduleFlow.Simulator(generate_gif=arg_list['create_gif'],
                                        loops=1,
                                        check_correctness=False,
                                        output_file_handler=outf)
    # check correctness set to false because we change the request
    # time and sequence of job without using the Application
    # interface (so the internal logs are not updated)

    for loop in range(arg_list['loops']):
        wd = Workload.Workload(distr, arg_list['jobs'])
        scenario_z0 = SpeculativeSubmission.ATOptimalScenario(
            0, distr, arg_list['param'], 0)
        scenario_z0.set_procs_request_method(wd, arg_list['procs'])
        scenario_hpc = SpeculativeSubmission.HPCScenario(0)
        makespan = run_simulation(simulation_z0, scenario_z0,
Esempio n. 8
0
            outf.close()
        logger.error(
            "Distribution %s was not found in the implemented classes" %
            (arg_list['distribution']))
        exit()

    scenario = []
    scenario.append(SpeculativeSubmission.HPCScenario(prev_instance))
    scenario.append(SpeculativeSubmission.MaxScenario(prev_instance))
    scenario.append(
        SpeculativeSubmission.TOptimalScenario(prev_instance, distr,
                                               arg_list['param']))
    scenario.append(
        SpeculativeSubmission.UOptimalScenario(prev_instance, distr,
                                               arg_list['param']))
    if arg_list['run_neuro'] is not None:
        scenario.append(SpeculativeSubmission.NeuroScenario(prev_instance))

    simulation = ScheduleFlow.Simulator(generate_gif=arg_list['create_gif'],
                                        loops=arg_list['loops_runtime'],
                                        check_correctness=True,
                                        output_file_handler=outf)
    for loop in range(arg_list['loops']):
        wd = Workload.Workload(distr, arg_list['jobs'])
        scenario[0].set_procs_request_method(wd, arg_list['procs'])
        for temp in scenario:
            run_simulation(simulation, temp, arg_list['procs'], wd)

    if arg_list['save_results'] is not None:
        outf.close()
    for procs in procs_list:
        if procs == "full":
            wd_large.set_processing_units(
                distribution=Workload.ConstantDistr(10))
            wd_small.set_processing_units(
                distribution=Workload.ConstantDistr(10))
        elif procs == "beta":
            wd_large.set_processing_units(procs_function=req_procs)
            wd_small.set_processing_units(procs_function=req_procs)

        outf = sys.stdout
        for sequence in scenario_list:
            #outf = open("backfill_%s_%s_%d" % (sequence.lower(),
            #                                   procs,
            #                                   num_jobs_small), "a")
            simulation = ScheduleFlow.Simulator(check_correctness=True,
                                                output_file_handler=outf)
            apl = generate_workload(sequence, procs, wd_large, wd_small,
                                    distr_large, distr_small)
            print(sequence, procs)
            print("Running the new backfilling scheme...")

            sch = SpeculativeBackfill.SpeculativeBatchScheduler(
                ScheduleFlow.System(10))
            simulation.create_scenario("speculative", sch)
            simulation.add_applications(apl)
            ret = simulation.run()

            print("Running the classic HPC backfilling scheme...")
            sch = ScheduleFlow.BatchScheduler(ScheduleFlow.System(10))
            simulation.create_scenario("classic", sch)
            simulation.add_applications(apl)
                              scenario_name="test_batch")
    simulator.run()

    sch = ScheduleFlow.BatchScheduler(
        ScheduleFlow.System(num_processing_units), total_queues=2)
    simulator.create_scenario(sch,
                              job_list=job_list,
                              scenario_name="test_batch")
    simulator.run()


if __name__ == '__main__':
    os.environ["ScheduleFlow_PATH"] = ".."
    num_processing_units = 10

    job_list = set()
    # create the list of applications
    for i in range(10):
        execution_time = np.random.randint(1800, 10000)
        request_time = execution_time + int(i / 2) * 1500
        processing_units = np.random.randint(1, num_processing_units + 1)
        submission_time = 0
        job_list.add(
            ScheduleFlow.Application(processing_units, submission_time,
                                     execution_time, [request_time]))
    # add a job that request less time than required for its first run
    job_list.add(
        ScheduleFlow.Application(np.random.randint(9, 11), 0, 5000,
                                 [4000, 5500]))

    run_scenario(num_processing_units, job_list)