Пример #1
0
def do_exp(wf_name):
    _wf = wf(wf_name)
    rm = ExperimentResourceManager(rg.r([10, 15, 25, 30]))
    estimator = SimpleTimeCostEstimator(comp_time_cost=0,
                                        transf_time_cost=0,
                                        transferMx=None,
                                        ideal_flops=20,
                                        transfer_time=100)

    empty_fixed_schedule_part = Schedule({node: [] for node in rm.get_nodes()})

    heft_schedule = run_heft(_wf, rm, estimator)

    ga_functions = GAFunctions2(_wf, rm, estimator)

    generate = partial(ga_generate,
                       ga_functions=ga_functions,
                       fixed_schedule_part=empty_fixed_schedule_part,
                       current_time=0.0,
                       init_sched_percent=0.05,
                       initial_schedule=heft_schedule)

    stats = tools.Statistics(lambda ind: ind.fitness.values[0])
    stats.register("avg", numpy.mean)
    stats.register("std", numpy.std)
    stats.register("min", numpy.min)
    stats.register("max", numpy.max)

    logbook = tools.Logbook()
    logbook.header = ["gen", "evals"] + stats.fields

    toolbox = Toolbox()
    toolbox.register("generate", generate)
    toolbox.register(
        "evaluate",
        fit_converter(
            ga_functions.build_fitness(empty_fixed_schedule_part, 0.0)))
    toolbox.register("clone", deepcopy)
    toolbox.register("mate", ga_functions.crossover)
    toolbox.register("sweep_mutation", ga_functions.sweep_mutation)
    toolbox.register("mutate", ga_functions.mutation)
    # toolbox.register("select_parents", )
    # toolbox.register("select", tools.selTournament, tournsize=4)
    toolbox.register("select", tools.selRoulette)
    pop, logbook, best = run_ga(toolbox=toolbox,
                                logbook=logbook,
                                stats=stats,
                                **GA_PARAMS)

    resulted_schedule = ga_functions.build_schedule(best,
                                                    empty_fixed_schedule_part,
                                                    0.0)

    Utility.validate_static_schedule(_wf, resulted_schedule)

    ga_makespan = Utility.makespan(resulted_schedule)
    return ga_makespan
Пример #2
0
def fitness_ordering_resourceconf(workflow, estimator, solution):
    os = solution[ORDERING_SPECIE]
    rcs = solution[RESOURCE_CONFIG_SPECIE]
    ## TODO: refactor this
    flops_set = [conf.flops for conf in rcs if conf is not None]
    resources = ResourceGenerator.r(flops_set)
    resource_manager = ExperimentResourceManager(resources)
    heft = DynamicHeft(workflow, resource_manager, estimator, os)
    schedule = heft.run({n: [] for n in resource_manager.get_nodes()})
    result = Utility.makespan(schedule)
    return 1 / result
Пример #3
0
def fitness_ordering_resourceconf(workflow,
                                  estimator,
                                  solution):
    os = solution[ORDERING_SPECIE]
    rcs = solution[RESOURCE_CONFIG_SPECIE]
    ## TODO: refactor this
    flops_set = [conf.flops for conf in rcs if conf is not None]
    resources = ResourceGenerator.r(flops_set)
    resource_manager = ExperimentResourceManager(resources)
    heft = DynamicHeft(workflow, resource_manager, estimator, os)
    schedule = heft.run({n: [] for n in resource_manager.get_nodes()})
    result = Utility.makespan(schedule)
    return 1/result
Пример #4
0
def do_exp(wf_name):
    _wf = wf(wf_name)
    rm = ExperimentResourceManager(rg.r([10, 15, 25, 30]))
    estimator = SimpleTimeCostEstimator(comp_time_cost=0, transf_time_cost=0, transferMx=None,
                                                ideal_flops=20, transfer_time=100)

    empty_fixed_schedule_part = Schedule({node: [] for node in rm.get_nodes()})

    heft_schedule = run_heft(_wf, rm, estimator)

    ga_functions = GAFunctions2(_wf, rm, estimator)

    generate = partial(ga_generate, ga_functions=ga_functions,
                               fixed_schedule_part=empty_fixed_schedule_part,
                               current_time=0.0, init_sched_percent=0.05,
                               initial_schedule=heft_schedule)


    stats = tools.Statistics(lambda ind: ind.fitness.values[0])
    stats.register("avg", numpy.mean)
    stats.register("std", numpy.std)
    stats.register("min", numpy.min)
    stats.register("max", numpy.max)

    logbook = tools.Logbook()
    logbook.header = ["gen", "evals"] + stats.fields

    toolbox = Toolbox()
    toolbox.register("generate", generate)
    toolbox.register("evaluate", fit_converter(ga_functions.build_fitness(empty_fixed_schedule_part, 0.0)))
    toolbox.register("clone", deepcopy)
    toolbox.register("mate", ga_functions.crossover)
    toolbox.register("sweep_mutation", ga_functions.sweep_mutation)
    toolbox.register("mutate", ga_functions.mutation)
    # toolbox.register("select_parents", )
    # toolbox.register("select", tools.selTournament, tournsize=4)
    toolbox.register("select", tools.selRoulette)
    pop, logbook, best = run_ga(toolbox=toolbox,
                                logbook=logbook,
                                stats=stats,
                                **GA_PARAMS)

    resulted_schedule = ga_functions.build_schedule(best, empty_fixed_schedule_part, 0.0)

    Utility.validate_static_schedule(_wf, resulted_schedule)

    ga_makespan = Utility.makespan(resulted_schedule)
    return ga_makespan
Пример #5
0
    def test_mapping_and_ordering(self):
        wf_name = "Montage_25"
        wf = WF(wf_name)
        manager = ExperimentResourceManager(
            ResourceGenerator.r([10, 15, 15, 25]))
        estimator = ExperimentEstimator(None, 20, 1.0, transfer_time=100)

        config = default_config(wf, manager, estimator)
        solution, pops, logbook, initial_pops = run_cooperative_ga(**config)
        schedule = build_schedule(wf, estimator, manager, solution)

        for k, items in schedule.mapping.items():
            for item in items:
                item.state = ScheduleItem.FINISHED
        ## TODO: refactor this
        ExecutorRunner.extract_result(schedule, True, wf)

        config_2 = deepcopy(config)
        config_2["interact_individuals_count"] = 4

        solution, pops, logbook, initial_pops = run_cooperative_ga(**config)
        schedule = build_schedule(wf, estimator, manager, solution)

        for k, items in schedule.mapping.items():
            for item in items:
                item.state = ScheduleItem.FINISHED
        ## TODO: refactor this
        ExecutorRunner.extract_result(schedule, True, wf)
        pass
Пример #6
0
def heft_exp(saver, wf_name, **params):
    _wf = wf(wf_name)
    rm = ExperimentResourceManager(rg.r(params["resource_set"]["nodes_conf"]))
    estimator = SimpleTimeCostEstimator(**params["estimator_settings"])

    dynamic_heft = DynamicHeft(_wf, rm, estimator)
    heft_machine = HeftExecutor(rm,
                                heft_planner=dynamic_heft,
                                **params["executor_params"])
    heft_machine.init()
    heft_machine.run()
    resulted_schedule = heft_machine.current_schedule

    Utility.validate_dynamic_schedule(_wf, resulted_schedule)

    data = {
        "wf_name": wf_name,
        "params": params,
        "result": {
            "makespan":
            Utility.makespan(resulted_schedule),
            ## TODO: this function should be remade to adapt under conditions of dynamic env
            #"overall_transfer_time": Utility.overall_transfer_time(resulted_schedule, _wf, estimator),
            "overall_execution_time":
            Utility.overall_execution_time(resulted_schedule),
            "overall_failed_tasks_count":
            Utility.overall_failed_tasks_count(resulted_schedule)
        }
    }

    if saver is not None:
        saver(data)

    return data
Пример #7
0
        def wrapper(*args, **kwargs):

            wf_name = kwargs['wf_name']
            reliability = kwargs['reliability']
            is_silent = kwargs['is_silent']

            the_bundle = kwargs.get('the_bundle', None)
            with_ga_initial = kwargs.get('with_ga_initial', False)
            logger = kwargs.get('logger', None)
            nodes_conf = kwargs.get("nodes_conf", None)

            wf = ExecutorRunner.get_wf(wf_name)
            bundle = self.get_bundle(the_bundle)
            (estimator, resource_manager,
             initial_schedule) = ExecutorRunner.get_infrastructure(
                 bundle, reliability, with_ga_initial, nodes_conf)

            nodes = kwargs.get("nodes", None)
            if nodes is not None:
                resource_manager = ExperimentResourceManager(
                    ResourceGenerator.r(nodes))

            kwargs["wf"] = wf
            kwargs["estimator"] = estimator
            kwargs["resource_manager"] = resource_manager
            kwargs["initial_schedule"] = initial_schedule

            result = func(*args, **kwargs)

            if logger is not None:
                logger.flush()

            (makespan, vl1, vl2) = self.extract_result(result, is_silent, wf)
            return makespan
Пример #8
0
    def get_infrastructure(bundle,
                           reliability,
                           with_ga_initial,
                           nodes_conf=None):

        if nodes_conf is not None:
            resources = ResourceGenerator.r(nodes_conf)
        else:
            resources = bundle.dedicated_resources

        nodes = HeftHelper.to_nodes(resources)
        realibility_map = {node.name: reliability for node in nodes}

        initial_schedule = None
        if with_ga_initial is True:
            initial_schedule = bundle.ga_schedule
            #initial_ga_makespan = Utility.get_the_last_time(initial_schedule )
            #print("Initial GA makespan: " + str(initial_ga_makespan))
            ## TODO: end

        ##======================
        ## create heft_executor
        ##======================
        estimator = ExperimentEstimator(bundle.transfer_mx, bundle.ideal_flops,
                                        realibility_map)

        resource_manager = ExperimentResourceManager(resources)
        return (estimator, resource_manager, initial_schedule)
Пример #9
0
    def test_fixed_ordering(self):
        _wf = wf("Montage_25")
        rm = ExperimentResourceManager(rg.r([10, 15, 25, 30]))
        estimator = SimpleTimeCostEstimator(comp_time_cost=0,
                                            transf_time_cost=0,
                                            transferMx=None,
                                            ideal_flops=20,
                                            transfer_time=100)
        sorted_tasks = HeftHelper.heft_rank(_wf, rm, estimator)

        heft_schedule = run_heft(_wf, rm, estimator)
        heft_mapping = schedule_to_position(heft_schedule)

        heft_gen = lambda: heft_mapping if random.random(
        ) > 0.95 else generate(_wf, rm, estimator)

        toolbox = Toolbox()
        # toolbox.register("generate", generate, _wf, rm, estimator)
        toolbox.register("generate", heft_gen)
        toolbox.register("fitness", fitness, _wf, rm, estimator, sorted_tasks)

        toolbox.register("force_vector_matrix", force_vector_matrix, rm)
        toolbox.register("velocity_and_position", velocity_and_position, _wf,
                         rm, estimator)
        toolbox.register("G", G)
        toolbox.register("kbest", Kbest)

        statistics = Statistics()
        statistics.register(
            "min", lambda pop: numpy.min([p.fitness.mofit for p in pop]))
        statistics.register(
            "avr", lambda pop: numpy.average([p.fitness.mofit for p in pop]))
        statistics.register(
            "max", lambda pop: numpy.max([p.fitness.mofit for p in pop]))
        statistics.register(
            "std", lambda pop: numpy.std([p.fitness.mofit for p in pop]))

        logbook = Logbook()
        logbook.header = ("gen", "G", "kbest", "min", "avr", "max", "std")

        pop_size = 100
        iter_number = 100
        kbest = pop_size
        ginit = 5

        final_pop = run_gsa(toolbox, statistics, logbook, pop_size,
                            iter_number, kbest, ginit)

        best = min(final_pop, key=lambda x: toolbox.fitness(x).mofit)
        solution = {
            MAPPING_SPECIE: list(zip(sorted_tasks, best)),
            ORDERING_SPECIE: sorted_tasks
        }
        schedule = build_schedule(_wf, estimator, rm, solution)
        Utility.validate_static_schedule(_wf, schedule)
        makespan = Utility.makespan(schedule)
        print("Final makespan: {0}".format(makespan))

        pass
Пример #10
0
def do_triple_island_exp(saver, alg_builder, chromosome_cleaner_builder,
                         schedule_to_chromosome_converter_builder, wf_name,
                         **params):
    _wf = wf(wf_name)
    rm = ExperimentResourceManager(rg.r(params["resource_set"]["nodes_conf"]))
    estimator = SimpleTimeCostEstimator(**params["estimator_settings"])
    chromosome_cleaner = chromosome_cleaner_builder(_wf, rm, estimator)
    dynamic_heft = DynamicHeft(_wf, rm, estimator)

    mpga = alg_builder(_wf,
                       rm,
                       estimator,
                       params["init_sched_percent"],
                       log_book=None,
                       stats=None,
                       alg_params=params["alg_params"])

    machine = MIGaHeftExecutor(heft_planner=dynamic_heft,
                               wf=_wf,
                               resource_manager=rm,
                               estimator=estimator,
                               ga_builder=lambda: mpga,
                               chromosome_cleaner=chromosome_cleaner,
                               schedule_to_chromosome_converter=
                               schedule_to_chromosome_converter_builder(
                                   wf, rm, estimator),
                               **params["executor_params"])

    machine.init()
    machine.run()
    resulted_schedule = machine.current_schedule

    Utility.validate_dynamic_schedule(_wf, resulted_schedule)

    data = {
        "wf_name": wf_name,
        "params": params,
        "result": {
            "makespan":
            Utility.makespan(resulted_schedule),
            ## TODO: this function should be remade to adapt under conditions of dynamic env
            #"overall_transfer_time": Utility.overall_transfer_time(resulted_schedule, _wf, estimator),
            "overall_execution_time":
            Utility.overall_execution_time(resulted_schedule),
            "overall_failed_tasks_count":
            Utility.overall_failed_tasks_count(resulted_schedule)
        }
    }

    if saver is not None:
        saver(data)

    return data
Пример #11
0
    def test_hall_of_fame(self):
        wf_name = "Montage_25"
        wf = WF(wf_name)
        manager = ExperimentResourceManager(
            ResourceGenerator.r([10, 15, 15, 25]))
        estimator = ExperimentEstimator(None, 20, 1.0, transfer_time=100)

        config = default_config(wf, manager, estimator)
        config["hall_of_fame_size"] = 3
        solution, pops, logbook, initial_pops = run_cooperative_ga(**config)
        schedule = build_schedule(wf, estimator, manager, solution)

        for k, items in schedule.mapping.items():
            for item in items:
                item.state = ScheduleItem.FINISHED
        ## TODO: refactor this
        ExecutorRunner.extract_result(schedule, True, wf)
        pass
Пример #12
0
    def test_predefined_init_pop(self):
        ## TODO: make loading from save file too
        wf_name = "Montage_25"
        _wf = WF(wf_name)
        manager = ExperimentResourceManager(
            ResourceGenerator.r([10, 15, 15, 25]))
        estimator = ExperimentEstimator(None, 20, 1.0, transfer_time=100)

        config = default_config(_wf, manager, estimator)

        ## TODO: need to hide low level details
        import os
        path = os.path.join(
            TEMP_PATH,
            "cga_exp_for_example/5760f488-932e-4224-942f-1f5ac68709bf.json")
        with open(path, "r") as f:
            data = json.load(f)
        initial_pops = data["initial_pops"]

        mps = [
            ListBasedIndividual([tuple(g) for g in ind])
            for ind in initial_pops[MAPPING_SPECIE]
        ]
        os = [
            ListBasedIndividual(ind) for ind in initial_pops[ORDERING_SPECIE]
        ]

        for s in config["species"]:
            if s.name == MAPPING_SPECIE:
                s.initialize = lambda ctx, size: mps
            elif s.name == ORDERING_SPECIE:
                s.initialize = lambda ctx, size: os
            else:
                raise Exception("Unexpected specie: " + s.name)

        solution, pops, logbook, initial_pops = run_cooperative_ga(**config)
        schedule = build_schedule(_wf, estimator, manager, solution)

        for k, items in schedule.mapping.items():
            for item in items:
                item.state = ScheduleItem.FINISHED
        ## TODO: refactor this
        ExecutorRunner.extract_result(schedule, True, _wf)
        pass
Пример #13
0
def wrap():
    ##Preparing
    ut = Utility()

    wf_name = 'Montage_25'
    dax1 = os.path.join(RESOURCES_PATH,  wf_name + '.xml')
    #dax2 = '..\\resources\\CyberShake_30.xml'

    wf_start_id_1 = "00"
    task_postfix_id_1 = "00"

    wf_start_id_2 = "10"
    task_postfix_id_2 = "10"

    deadline_1 = 1000
    deadline_2 = 3000

    pipeline_1 = ut.generateUrgentPipeline(dax1,
                                           wf_name,
                                           wf_start_id_1,
                                           task_postfix_id_1,
                                           deadline_1)

    ##pipeline_2 = ut.generateUrgentPipeline(dax2,
    ##                                       wf_start_id_2,
    ##                                       task_postfix_id_2,
    ##                                       deadline_2)
    common_pipeline = pipeline_1 ## + pipeline_2

    rgen = ResourceGenerator()
    resources = rgen.generate()
    transferMx = rgen.generateTransferMatrix(resources)

    estimator = ExperimentEstimator(transferMx, 20)

    planner = StaticHeftPlanner()
    planner.estimator = estimator
    planner.resource_manager = ExperimentResourceManager(resources)
    planner.workflows = common_pipeline

    schedule = planner.schedule()

    print("planner.global_count: " + str(planner.global_count))
Пример #14
0
def do_exp(wf_name, **params):

    _wf = wf(wf_name)

    ga_makespan, heft_make, ga_schedule, heft_sched = MixRunner()(_wf,
                                                                  **params)

    rm = ExperimentResourceManager(rg.r(params["nodes_conf"]))
    estimator = SimpleTimeCostEstimator(comp_time_cost=0,
                                        transf_time_cost=0,
                                        transferMx=None,
                                        ideal_flops=params["ideal_flops"],
                                        transfer_time=params["transfer_time"])

    heft_schedule = run_heft(_wf, rm, estimator)
    heft_makespan = Utility.makespan(heft_schedule)

    data = {
        "wf_name": wf_name,
        "params": params,
        "heft": {
            "makespan":
            heft_makespan,
            "overall_transfer_time":
            Utility.overall_transfer_time(heft_schedule, _wf, estimator),
            "overall_execution_time":
            Utility.overall_execution_time(heft_schedule)
        },
        "ga": {
            "makespan":
            ga_makespan,
            "overall_transfer_time":
            Utility.overall_transfer_time(ga_schedule, _wf, estimator),
            "overall_execution_time":
            Utility.overall_execution_time(ga_schedule)
        },
        #"heft_schedule": heft_schedule,
        #"ga_schedule": ga_schedule
    }

    return data
Пример #15
0
    def test_fixed_specie(self):
        wf_name = "Montage_25"
        wf = WF(wf_name)
        manager = ExperimentResourceManager(
            ResourceGenerator.r([10, 15, 15, 25]))
        estimator = ExperimentEstimator(None, 20, 1.0, transfer_time=100)

        config = default_config(wf, manager, estimator)

        ms_ideal_ind = build_ms_ideal_ind(wf, manager)
        config["species"][0] = Specie(
            name=MAPPING_SPECIE,
            fixed=True,
            representative_individual=ListBasedIndividual(ms_ideal_ind))

        solution, pops, logbook, initial_pops = run_cooperative_ga(**config)
        schedule = build_schedule(wf, estimator, manager, solution)

        for k, items in schedule.mapping.items():
            for item in items:
                item.state = ScheduleItem.FINISHED
        ## TODO: refactor this
        ExecutorRunner.extract_result(schedule, True, wf)
        pass
Пример #16
0
    def _construct_environment(self, *args, **kwargs):
        wf_name = kwargs["wf_name"]
        nodes_conf = kwargs.get("nodes_conf", None)
        ideal_flops = kwargs.get("ideal_flops", 20)
        transfer_time = kwargs.get("transfer_time", 100)

        # dax1 = '../../resources/' + wf_name + '.xml'
        # wf = Utility.readWorkflow(dax1, wf_name)

        _wf = wf(wf_name)

        rgen = ResourceGenerator(min_res_count=1,
                                 max_res_count=1,
                                 min_node_count=4,
                                 max_node_count=4,
                                 min_flops=5,
                                 max_flops=10)
        resources = rgen.generate()
        transferMx = rgen.generateTransferMatrix(resources)

        if nodes_conf is None:
            bundle = Utility.get_default_bundle()
            resources = bundle.dedicated_resources
            transferMx = bundle.transfer_mx
            ideal_flops = bundle.ideal_flops
            ##TODO: end
        else:
            ## TODO: refactor it later.
            resources = ResourceGenerator.r(nodes_conf)
            transferMx = rgen.generateTransferMatrix(resources)
            ##

        estimator = ExperimentEstimator(transferMx, ideal_flops, dict(),
                                        transfer_time)
        resource_manager = ExperimentResourceManager(resources)
        return (_wf, resource_manager, estimator)
Пример #17
0
from heft.algs.ga.GAImplementation.GAImpl import GAFactory
from heft.core.CommonComponents.ExperimentalManagers import ExperimentResourceManager, ExperimentEstimator
from heft.core.environment import ResourceGenerator
from heft.core.environment.ResourceManager import Schedule
from heft.core.environment.Utility import profile_decorator, wf

_wf = wf("Montage_100")
resources = ResourceGenerator.r([10, 15, 25, 30])
resource_manager = ExperimentResourceManager(resources)
estimator = ExperimentEstimator(None,
                                ideal_flops=20,
                                reliability=1.0,
                                transfer_time=100)

ga = GAFactory.default().create_ga(silent=True,
                                   wf=_wf,
                                   resource_manager=resource_manager,
                                   estimator=estimator,
                                   ga_params={
                                       "population": 10,
                                       "crossover_probability": 0.8,
                                       "replacing_mutation_probability": 0.5,
                                       "sweep_mutation_probability": 0.4,
                                       "generations": 20
                                   })


@profile_decorator
def fnc():
    empty_schedule = Schedule(
        {node: []
Пример #18
0
from heft.algs.common.particle_operations import MappingParticle
from heft.algs.heft.DSimpleHeft import run_heft
from heft.algs.heft.HeftHelper import HeftHelper
from heft.algs.pso.mapping_operators import schedule_to_position, fitness, update, generate, construct_solution
from heft.algs.pso.sdpso import run_pso

from heft.core.CommonComponents.ExperimentalManagers import ExperimentResourceManager
from heft.core.environment.Utility import Utility, wf
from heft.algs.common.mapordschedule import build_schedule, MAPPING_SPECIE, ORDERING_SPECIE
from heft.experiments.aggregate_utilities import interval_statistics, interval_stat_string
from heft.experiments.cga.mobjective.utility import SimpleTimeCostEstimator
from heft.core.environment.ResourceGenerator import ResourceGenerator as rg
from heft.experiments.cga.utilities.common import repeat

_wf = wf("Montage_75")
rm = ExperimentResourceManager(rg.r([10, 15, 25, 30]))
estimator = SimpleTimeCostEstimator(comp_time_cost=0,
                                    transf_time_cost=0,
                                    transferMx=None,
                                    ideal_flops=20,
                                    transfer_time=100)
sorted_tasks = HeftHelper.heft_rank(_wf, rm, estimator)

heft_schedule = run_heft(_wf, rm, estimator)
heft_mapping = schedule_to_position(heft_schedule)

heft_mapping.velocity = MappingParticle.Velocity({})

heft_gen = lambda n: [
    deepcopy(heft_mapping)
    if random.random() > 1.0 else generate(_wf, rm, estimator, 1)[0]
Пример #19
0
def do_island_inherited_pop_exp(alg_builder, mp_alg_builder, algorithm_builder,
                                chromosome_cleaner_builder,
                                schedule_to_chromosome_converter_builder,
                                wf_name, **params):
    _wf = wf(wf_name)
    rm = ExperimentResourceManager(rg.r(params["resource_set"]["nodes_conf"]))
    estimator = SimpleTimeCostEstimator(**params["estimator_settings"])
    chromosome_cleaner = chromosome_cleaner_builder(_wf, rm, estimator)
    dynamic_heft = DynamicHeft(_wf, rm, estimator)
    ga = alg_builder(_wf,
                     rm,
                     estimator,
                     params["init_sched_percent"],
                     log_book=None,
                     stats=None,
                     alg_params=params["alg_params"])

    ## TODO: remake this part later.
    # def reverse_interface_adapter(func):
    #     def wrap(*args, **kwargs):
    #         (best, pop, resulted_schedule, _), logbook = func(*args, **kwargs)
    #         return pop, logbook, best
    #     return wrap

    mpga = mp_alg_builder(
        _wf,
        rm,
        estimator,
        params["init_sched_percent"],
        algorithm=partial(algorithm_builder, **params["alg_params"]),
        #algorithm=reverse_interface_adapter(ga),
        log_book=None,
        stats=None,
        alg_params=params["alg_params"])

    kwargs = dict(params["executor_params"])
    kwargs.update(params["alg_params"])
    kwargs["ga_params"] = {"population": params["alg_params"]["n"]}

    machine = MPGaHeftOldPopExecutor(heft_planner=dynamic_heft,
                                     wf=_wf,
                                     resource_manager=rm,
                                     estimator=estimator,
                                     stat_saver=None,
                                     ga_builder=lambda: ga,
                                     mpga_builder=lambda: mpga,
                                     chromosome_cleaner=chromosome_cleaner,
                                     mpnewVSmpoldmode=False,
                                     mixed_init_pop=False,
                                     emigrant_selection=None,
                                     check_evolution_for_stopping=False,
                                     schedule_to_chromosome_converter=
                                     schedule_to_chromosome_converter_builder(
                                         wf, rm, estimator),
                                     **kwargs)

    machine.init()
    machine.run()
    resulted_schedule = machine.current_schedule
    stat_data = machine.executor_stat_data

    Utility.validate_dynamic_schedule(_wf, resulted_schedule)

    data = {
        "wf_name": wf_name,
        "params": params,
        "result": {
            "makespan":
            Utility.makespan(resulted_schedule),
            ## TODO: this function should be remade to adapt under conditions of dynamic env
            #"overall_transfer_time": Utility.overall_transfer_time(resulted_schedule, _wf, estimator),
            "overall_execution_time":
            Utility.overall_execution_time(resulted_schedule),
            "overall_failed_tasks_count":
            Utility.overall_failed_tasks_count(resulted_schedule)
        }
    }

    return data
Пример #20
0
def do_exp_heft_schedule():
    res = do_exp_schedule(True)
    return res[0]


def do_exp_ga_schedule():
    res = do_exp_schedule(False)
    return (res[0], res[4])


if __name__ == '__main__':
    print("Population size: " + str(PARAMS["ga_params"]["population"]))

    _wf = wf(wf_names[0])
    rm = ExperimentResourceManager(rg.r(PARAMS["nodes_conf"]))
    estimator = SimpleTimeCostEstimator(comp_time_cost=0,
                                        transf_time_cost=0,
                                        transferMx=None,
                                        ideal_flops=PARAMS["ideal_flops"],
                                        transfer_time=PARAMS["transfer_time"])

    heft_schedule = run_heft(_wf, rm, estimator)
    heft_makespan = Utility.makespan(heft_schedule)
    overall_transfer = Utility.overall_transfer_time(heft_schedule, _wf,
                                                     estimator)
    overall_execution = Utility.overall_execution_time(heft_schedule)

    print(
        "Heft makespan: {0}, Overall transfer time: {1}, Overall execution time: {2}"
        .format(heft_makespan, overall_transfer, overall_execution))
Пример #21
0
def do_inherited_pop_exp(saver, alg_builder, chromosome_cleaner_builder,
                         wf_name, **params):
    _wf = wf(wf_name)
    rm = ExperimentResourceManager(rg.r(params["resource_set"]["nodes_conf"]))
    estimator = SimpleTimeCostEstimator(**params["estimator_settings"])
    chromosome_cleaner = chromosome_cleaner_builder(_wf, rm, estimator)
    dynamic_heft = DynamicHeft(_wf, rm, estimator)

    logbook = Logbook()

    stats = tools.Statistics(lambda ind: ind.fitness.values[0])
    stats.register("avg", numpy.mean)
    stats.register("std", numpy.std)
    stats.register("min", numpy.min)
    stats.register("max", numpy.max)

    ga = alg_builder(_wf,
                     rm,
                     estimator,
                     params["init_sched_percent"],
                     log_book=logbook,
                     stats=stats,
                     alg_params=params["alg_params"])

    machine = GaHeftOldPopExecutor(heft_planner=dynamic_heft,
                                   wf=_wf,
                                   resource_manager=rm,
                                   estimator=estimator,
                                   stat_saver=None,
                                   ga_builder=lambda: ga,
                                   chromosome_cleaner=chromosome_cleaner,
                                   **params["executor_params"])

    machine.init()
    print("Executor start")
    machine.run()
    print("Executor stop")

    resulted_schedule = machine.current_schedule
    stat_data = machine.executor_stat_data

    Utility.validate_dynamic_schedule(_wf, resulted_schedule)

    data = {
        "wf_name": wf_name,
        "params": params,
        "result": {
            "random_init_logbook":
            stat_data["random_init_logbook"],
            "inherited_init_logbook":
            stat_data["inherited_init_logbook"],
            "makespan":
            Utility.makespan(resulted_schedule),
            ## TODO: this function should be remade to adapt under conditions of dynamic env
            #"overall_transfer_time": Utility.overall_transfer_time(resulted_schedule, _wf, estimator),
            "overall_execution_time":
            Utility.overall_execution_time(resulted_schedule),
            "overall_failed_tasks_count":
            Utility.overall_failed_tasks_count(resulted_schedule)
        }
    }

    if saver is not None:
        saver(data)

    return data