コード例 #1
0
def run_experiment(fnc,
                   wf_name,
                   tsk_period,
                   repeat_count,
                   pop_size,
                   produce_queue=produce_queue_of_tasks):
    save_path = "../../results/[{0}]_[{1}]_[{2}by{3}]_[{4}]/".format(
        wf_name, pop_size, tsk_period, repeat_count,
        ComparisonUtility.cur_time())

    if not os.path.exists(save_path):
        print("create DIR: " + str(save_path))
        os.makedirs(save_path)

    with open(save_path + "timestamp.txt", "w") as f:
        f.write("Start: {0}".format(ComparisonUtility.cur_time()))

    ## TODO: replace it with normal ticket description
    fun = partial(fnc, save_path=save_path, wf_name=wf_name, pop_size=pop_size)
    to_exec = produce_queue(wf_name, tsk_period, repeat_count)
    # res = list(futures.map_as_completed(fun, to_exec))
    res = list(map(fun, to_exec))

    with open(save_path + "timestamp.txt", "a") as f:
        f.write("End: {0}".format(ComparisonUtility.cur_time()))
    pass
コード例 #2
0
ファイル: VersusFunctors.py プロジェクト: fonhorst/heft
    def __call__(self, wf_name):
        dax2 = '..\\..\\resources\\' + wf_name + '.xml'
        ## dedicated resource are the same for all bundles
        path = '..\\..\\resources\\saved_schedules\\' + wf_name + '_bundle' + '.json'
        bundle = Utility.load_schedule(path, Utility.readWorkflow(dax2, wf_name))

        mainCloudHEFTwithGA = partial(self.mainCloudHeft, with_ga_initial=True, the_bundle=bundle)
        mainHEFTwithGA = partial(self.mainHeft, with_ga_initial=True, the_bundle=bundle)
        mainGAwithBundle = partial(self.mainGA, the_bundle=bundle)

        resGA = run("GA", mainGAwithBundle, wf_name, self.reliability, self.n)
        resHeft = run("Heft + GA", mainHEFTwithGA, wf_name, self.reliability, self.n)
        resCloudHeft = run("HeftREx + GA", mainCloudHEFTwithGA, wf_name, self.reliability, self.n)

        pc_hg = (1 - resHeft[2]/resGA[2])*100
        pc_chg = (1 - resCloudHeft[2]/resGA[2])*100
        pc_chh = (1 - resCloudHeft[2]/resHeft[2])*100

        result = dict()
        result['wf_name'] = wf_name
        result['algorithms'] = {
            self.HEFT_REX_GA: ComparisonUtility.get_dict(resCloudHeft),
            self.GA_HEFT: ComparisonUtility.get_dict(resHeft),
            self.GA: ComparisonUtility.get_dict(resGA)
        }
        result['profit_by_avr'] = {
            "ga_heft vs ga": pc_hg,
            "ga_heft_ReX vs ga": pc_chh,
            "ga_heft_ReX vs ga_heft": pc_chg
        }
        return result
コード例 #3
0
ファイル: VersusFunctors.py プロジェクト: fonhorst/heft
    def __call__(self, wf_name, output_file=None):
        print("Run counts: " + str(self.n))

        f = open(output_file, 'a')

        f.write("===============\n")
        f.write("=== HEFT Run\n")
        f.write("===============\n")
        resHeft = run(self.HEFT, self.mainHeft, wf_name, 1.0, f, n=self.n)
        f.write("===============\n")
        f.write("=== GAHEFT Run\n")
        f.write("===============\n")
        resGaHeft = run(self.GA_HEFT, self.mainGaHeft, wf_name, 1.0, f, n=self.n)

        pc = (1 - resGaHeft[2]/resHeft[2])*100
        f.write("GaHeft vs Heft with WF adding: " + str(pc) + '\n')
        f.close()

        result = dict()
        result['wf_name'] = wf_name
        result['algorithms'] = {
            self.HEFT: ComparisonUtility.get_dict(resHeft),
            self.GA_HEFT: ComparisonUtility.get_dict(resGaHeft)
        }
        result['profit_by_avr'] = {"GaHeft vs Heft": pc}
        return result
コード例 #4
0
ファイル: VersusFunctors.py プロジェクト: fonhorst/heft
    def __call__(self, wf_name):
        resHeft = run(self.HEFT, self.mainHeft, wf_name, self.reliability)
        resCloudHeft = run(self.CLOUD_HEFT, self.mainCloudHeft, wf_name, self.reliability)

        pc = (1 - resCloudHeft[2]/resHeft[2])*100
        result = dict()
        result['wf_name'] = wf_name
        result['algorithms'] = {
            self.HEFT: ComparisonUtility.get_dict(resHeft),
            self.CLOUD_HEFT: ComparisonUtility.get_dict(resCloudHeft)
        }
        result['profit_by_avr'] = {"heft_ReX vs Heft": pc}
        return result
コード例 #5
0
ファイル: Utility.py プロジェクト: fonhorst/heft
    def create_jedule_visualization(schedule, name):
        ## TODO: fix it later.
        old_dir = os.getcwd()
        os.chdir('../../')

        folder = './resources/schedule_visualization/' + name + '_' + ComparisonUtility.cur_time(
        )
        jed_path = folder + '/' + name + '.jed'
        cmap_path = folder + '/' + 'cmap.xml'
        node_mapping_path = folder + '/' + 'node_mapping.txt'
        output_path = folder + '/' + 'output.png'

        os.makedirs(folder)

        Utility.write_schedule_to_jed(schedule, jed_path, cmap_path,
                                      node_mapping_path)

        ## TODO: remake for *nix systems
        p = subprocess.Popen(['java', '-Xmx512M', '-jar', './jedule-0.3.2.jar', 'net.sf.jedule.JeduleStarter', \
                              '-f', jed_path, '-p', 'simgrid', '-o', output_path, '-d', '1024x768', \
                              '-cm', cmap_path])
        p.communicate()

        ## TODO: fix it later.
        os.chdir(old_dir)
        pass
コード例 #6
0
    def test_mpgaheftoldpop_mixed_init_pop_executor(self):
        TEST_SANDBOX_DIRECTORY = "../../results/mixed_mpgaheftoldpop_[{0}]_[{1}]/".format(
            CU.cur_time(), CU.uuid())
        tsks_list = [
            "ID00000_000", "ID00005_000", "ID00010_000", "ID00015_000",
            "ID00020_000"
        ]
        for tsk in tsks_list:
            # run executor
            self.mixed_mpgaheftoldpop_func(save_path=TEST_SANDBOX_DIRECTORY,
                                           task_id_to_fail=tsk)
            pass

        # check sandbox
        assert os.path.exists(TEST_SANDBOX_DIRECTORY), "Sandbox wasn't created"

        # check generated files
        generated_files = [
            TEST_SANDBOX_DIRECTORY + entry
            for entry in os.listdir(TEST_SANDBOX_DIRECTORY)
            if os.path.isfile(TEST_SANDBOX_DIRECTORY + entry)
        ]
        assert len(generated_files) == len(
            tsks_list
        ), "Count of generated files( {0} ) after simulations doesn't match assumed( {1} )".format(
            len(generated_files), len(tsks_list))

        # check content of generated files
        for file in generated_files:
            with open(file, "r") as f:
                data = json.load(f)
                for d in data:
                    self._check_spec(d, self.stat_specification)
                    #TODO: there is possibility to get several results in one file, but for now we don't consider it
                    tsks_list.remove(d["task_id"])
                    pass
            pass
        assert len(
            tsks_list
        ) == 0, "results for next tasks were not be found {0}".format(
            tsks_list)

        shutil.rmtree(TEST_SANDBOX_DIRECTORY)
        pass
コード例 #7
0
ファイル: ExpRunner.py プロジェクト: fonhorst/heft
def run_experiment(fnc, wf_name, tsk_period, repeat_count, pop_size, produce_queue=produce_queue_of_tasks):
    save_path = "../../results/[{0}]_[{1}]_[{2}by{3}]_[{4}]/".format(wf_name, pop_size, tsk_period, repeat_count, ComparisonUtility.cur_time())

    if not os.path.exists(save_path):
        print("create DIR: " + str(save_path))
        os.makedirs(save_path)

    with open(save_path + "timestamp.txt", "w") as f:
        f.write("Start: {0}".format(ComparisonUtility.cur_time()))

    ## TODO: replace it with normal ticket description
    fun = partial(fnc, save_path=save_path, wf_name=wf_name, pop_size=pop_size)
    to_exec = produce_queue(wf_name, tsk_period, repeat_count)
    # res = list(futures.map_as_completed(fun, to_exec))
    res = list(map(fun, to_exec))


    with open(save_path + "timestamp.txt", "a") as f:
        f.write("End: {0}".format(ComparisonUtility.cur_time()))
    pass
コード例 #8
0
ファイル: Utility.py プロジェクト: visheratin/heft
    def create_jedule_visualization(schedule, name):
        ## TODO: fix it later.
        old_dir = os.getcwd()
        os.chdir('../../')

        folder = './resources/schedule_visualization/' + name + '_' + ComparisonUtility.cur_time()
        jed_path = folder + '/' + name + '.jed'
        cmap_path = folder + '/' + 'cmap.xml'
        node_mapping_path = folder + '/' + 'node_mapping.txt'
        output_path = folder + '/' + 'output.png'

        os.makedirs(folder)

        Utility.write_schedule_to_jed(schedule, jed_path, cmap_path, node_mapping_path)

        ## TODO: remake for *nix systems
        p = subprocess.Popen(['java', '-Xmx512M', '-jar', './jedule-0.3.2.jar', 'net.sf.jedule.JeduleStarter', \
                              '-f', jed_path, '-p', 'simgrid', '-o', output_path, '-d', '1024x768', \
                              '-cm', cmap_path])
        p.communicate()

        ## TODO: fix it later.
        os.chdir(old_dir)
        pass
コード例 #9
0
ファイル: GaHeftvsHeft.py プロジェクト: fonhorst/heft
import os
from uuid import uuid4

from heft.experiments.comparison_experiments.common.ComparisonBase import ResultSaver, ComparisonUtility
from heft.experiments.comparison_experiments.common.VersusFunctors import GaHeftvsHeft


reliability = 0.99

wf_name = "Montage_25"

save_file_name = ComparisonUtility.build_save_path(wf_name + '\\GaHeftvsHeft_['+str(uuid4())+']')
result_saver = ResultSaver(save_file_name)
exp = GaHeftvsHeft(reliability, n=1)
def calc(wf_name, out):
    return result_saver(exp(wf_name, out))

print("fail_duration: 40")
print("reliability %s" % reliability)

base_dir = "../../resources/experiment_1/"
if not os.path.exists(base_dir):
    os.makedirs(base_dir)
output_file_template = base_dir + "[{0}]_[{1}]_[{2}].txt"
out = lambda w_name: output_file_template.format(w_name, reliability, ComparisonUtility.cur_time())

wf_names = [wf_name]

[calc(wf_name, out(wf_name)) for wf_name in wf_names]
コード例 #10
0
from heft.experiments.comparison_experiments.common.ComparisonBase import ResultSaver, ComparisonUtility
from heft.experiments.comparison_experiments.common.VersusFunctors import GAvsHeftGA

reliability = 0.9

save_file_name = ComparisonUtility.build_save_path('CloudHeftvsHeft')
result_saver = ResultSaver(save_file_name)
exp = GAvsHeftGA(reliability)
def calc(wf_name):
    return result_saver(exp(wf_name))

print("reliability %s" % reliability)

wf_names = ["Montage_40"]

result = [calc(wf_name) for wf_name in wf_names]

print(str(result))

# result = [calc(wf_names[0]) for i in range(10)]

# avrs = [r['algorithms']["ga"]['Avr'] for r in result]

# print("Result: " + str(sum(avrs)/len(avrs)))




コード例 #11
0
ファイル: FailExperiment.py プロジェクト: fonhorst/heft
        f.write("\t" + str(res) + "\n")
        pass
    f.flush()
    pass

wf_name = "Montage_25"

ids = ["ID000" + (str(i) if len(str(i)) == 2 else "0" + str(i)) + "_000" for i in range(25)]

base_dir = "../../resources/singlefailexps/"

if not os.path.exists(base_dir):
    os.makedirs(base_dir)

heft_name = "HEFT"
heft_path = base_dir + "[{0}]_[{1}]_[{2}].txt".format(wf_name, heft_name, ComparisonUtility.cur_time())
heft_f = open(heft_path, "w")
heft_f.write("alg_name: " + heft_name + "\n")
heft_f.write("wf_name: " + str(wf_name) + "\n")

# gaheft_name = "GAHEFT"
# gaheft_path = base_dir + "[{0}]_[{1}]_[{2}].txt".format(wf_name, gaheft_name, ComparisonUtility.cur_time())
# gaheft_f = open(gaheft_path, "w")
# gaheft_f.write("alg_name: " + gaheft_name + "\n")
# gaheft_f.write("wf_name: " + str(wf_name) + "\n")

n = 20

#     ## reliability 0.95 doesn't matter anything in this case# for id in ids[6:7]:
failure_coeffs = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]
コード例 #12
0
 def build_saver(self, *args, **kwargs):
     path = kwargs.get("save_path", self.DEFAULT_SAVE_PATH)
     stat_saver = ResultSaver(path + self.DEFAULT_TEMPLATE_NAME.format(
         kwargs["key_for_save"], ComparisonUtility.cur_time(),
         ComparisonUtility.uuid()))
     return stat_saver
コード例 #13
0
ファイル: ExecutorRunner.py プロジェクト: fonhorst/heft
 def build_saver(self, *args, **kwargs):
     path = kwargs.get("save_path", self.DEFAULT_SAVE_PATH)
     stat_saver = ResultSaver(path + self.DEFAULT_TEMPLATE_NAME.format(kwargs["key_for_save"], ComparisonUtility.cur_time(), ComparisonUtility.uuid()))
     return stat_saver
コード例 #14
0
wf_name = "Montage_25"

ids = [
    "ID000" + (str(i) if len(str(i)) == 2 else "0" + str(i)) + "_000"
    for i in range(25)
]

base_dir = "../../resources/singlefailexps/"

if not os.path.exists(base_dir):
    os.makedirs(base_dir)

heft_name = "HEFT"
heft_path = base_dir + "[{0}]_[{1}]_[{2}].txt".format(
    wf_name, heft_name, ComparisonUtility.cur_time())
heft_f = open(heft_path, "w")
heft_f.write("alg_name: " + heft_name + "\n")
heft_f.write("wf_name: " + str(wf_name) + "\n")

# gaheft_name = "GAHEFT"
# gaheft_path = base_dir + "[{0}]_[{1}]_[{2}].txt".format(wf_name, gaheft_name, ComparisonUtility.cur_time())
# gaheft_f = open(gaheft_path, "w")
# gaheft_f.write("alg_name: " + gaheft_name + "\n")
# gaheft_f.write("wf_name: " + str(wf_name) + "\n")

n = 20

#     ## reliability 0.95 doesn't matter anything in this case# for id in ids[6:7]:
failure_coeffs = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]
コード例 #15
0
ファイル: GeneratedDataTest.py プロジェクト: fonhorst/heft
    def test_mpgaheftoldpop_mixed_init_pop_executor(self):
        TEST_SANDBOX_DIRECTORY = "../../results/mixed_mpgaheftoldpop_[{0}]_[{1}]/".format(CU.cur_time(), CU.uuid())
        tsks_list = ["ID00000_000", "ID00005_000", "ID00010_000", "ID00015_000", "ID00020_000"]
        for tsk in tsks_list:
            # run executor
            self.mixed_mpgaheftoldpop_func(save_path=TEST_SANDBOX_DIRECTORY, task_id_to_fail=tsk)
            pass

        # check sandbox
        assert os.path.exists(TEST_SANDBOX_DIRECTORY), "Sandbox wasn't created"

        # check generated files
        generated_files = [TEST_SANDBOX_DIRECTORY + entry for entry in os.listdir(TEST_SANDBOX_DIRECTORY) if os.path.isfile(TEST_SANDBOX_DIRECTORY + entry)]
        assert len(generated_files) == len(tsks_list), "Count of generated files( {0} ) after simulations doesn't match assumed( {1} )".format(len(generated_files), len(tsks_list))

        # check content of generated files
        for file in generated_files:
            with open(file, "r") as f:
                data = json.load(f)
                for d in data:
                    self._check_spec(d, self.stat_specification)
                    #TODO: there is possibility to get several results in one file, but for now we don't consider it
                    tsks_list.remove(d["task_id"])
                    pass
            pass
        assert len(tsks_list) == 0, "results for next tasks were not be found {0}".format(tsks_list)


        shutil.rmtree(TEST_SANDBOX_DIRECTORY)
        pass