コード例 #1
0
ファイル: peft.py プロジェクト: wzwtime/DAG
 def __init__(self, v_, q_, n_):
     """n is which dag"""
     self.heft = heft_new.Heft(q_, n_, v_, 0)
     self.pred = self.heft.pred_list()
     self.pred.sort(key=operator.itemgetter(0), reverse=False)
     self.computation_costs = self.heft.computation_costs
     self.dag = self.heft.dag
     # self.dag = {}
     # self.computation_costs = []
     self.v = v_
     self.q = q_
     self.n = n_
     self.M = 10000
     # self.pred = []
     self.oct_table = {}
     self.rank_oct = []
     self.rank_oct_copy = []
     self.ready_list = []
     self.Pi = {}
     self.scheduler = []
     self.makespan = 0
     self.start_time = 0
     self.end_time = 0
     self.running_time = 0
     self.Tlevel = []
コード例 #2
0
 def __init__(self, q, n, v):
     """"""
     self.heft = heft_new.Heft(q, n, v)
     self.pred = self.heft.pred_list()
     self.computation_costs = self.heft.computation_costs
     self.dag = self.heft.dag
     self.q = q
     self.n = n
     self.v = v
     self.Pi = {}
     self.rank_d = []
     self.scheduler = [
     ]  # Record the processor number where each task schedule is located
     self.priority = []
     self.cp = 0  # the priority of entry task
     self.set_cp = [
         1,
     ]  # A set of nodes with the same priority as the entry node
     self.rank_u_copy = self.heft.rank_u_copy
     self.label_pi = 0
     self.cp_min_costs = 0
     self.start_time = 0
     self.end_time = 0
     self.running_time = 0
     self.slr = 0
     self.speedup = 0
コード例 #3
0
ファイル: init_population.py プロジェクト: wzwtime/DAG
    def __init__(self, popsize, v_, q_, n_):
        self.init_individuals = []
        self.Population = []
        """HEFT"""
        self.heft = heft_new.Heft(q_, n_, v_, 0)
        self.computation_costs = self.heft.computation_costs
        self.dag = self.heft.dag
        self.heft_make_span = self.heft.heft()
        self.pred = self.heft.pred
        self.Blevel = self.heft.Blevel
        self.init_individuals.append(self.Blevel)
        self.Population.append(self.Blevel)
        """CPOP"""
        self.cpop = cpop_new.Cpop(q_, n_, v_)
        self.cpop_make_span = self.cpop.cpop()
        self.Llevel = self.cpop.Llevel
        self.init_individuals.append(self.Llevel)
        self.Population.append(self.Llevel)
        """PEFT"""
        self.peft = peft.Peft(
            v_,
            q_,
            n_,
        )
        self.peft_make_span = self.peft.peft()
        self.Tlevel = self.peft.Tlevel
        self.init_individuals.append(self.Tlevel)
        self.Population.append(self.Tlevel)

        self.v = v_

        self.PopSize = popsize
        self.PopulationN = 3
コード例 #4
0
    def reset(self, v, Q, t):
        """"""
        # self.dag = heft.dag
        # self.computation_costs = heft.computation_costs
        self.heft = heft_new.Heft(Q, t, v)
        self.dag = self.heft.read_dag()
        self.computation_costs = self.heft.read_computation_costs()
        self.pred = self.heft.pred_list()
        # self.heft.heft()

        self.v = v  # v is the number of nodes
        self.rank_u = copy.deepcopy(self.rank_u_copy)
        self.rank_u_copy = copy.deepcopy(self.rank_u)
        # self.pred = heft.pred_list()

        self.q = len(self.computation_costs[0])  # the number of processors
        self.state = []
        self.state.append(self.v)
        for index in range(self.q):
            self.state.append(0)
        for index in range(self.q):
            self.state.append(self.computation_costs[0][index])
        self.next_state_ = []  # the number of remaining tasks n, est, wij
        self.scheduler = [
        ]  # tasks were scheduling on processors.[{1: 3}, {4: 1}, {3: 2}, ...]
        self.Pi = {}  # the task information on processors

        self.reward_ = []
        self.makespan = 0
        return self.state
コード例 #5
0
def write(q, v, N):
    n = 1
    while n <= N:
        heft = heft_new.Heft(q, n, v)
        heft_makespan = heft.heft()
        min_costs = heft.min_costs

        cpop = cpop_new.Cpop(q, n, v)
        cpop_makespan = cpop.cpop()
        cp_min_costs = cpop.cp_min_costs

        slr_heft = round(1.0 * heft_makespan / cp_min_costs, 4)
        # slr_cpop = round(1.0 * cpop_makespan / cp_min_costs, 4)
        slr_cpop = cpop.slr

        # speedup_heft = round(1.0 * min_costs / heft_makespan, 4)
        speedup_heft = heft.speedup
        speedup_cpop = round(1.0 * min_costs / cpop_makespan, 4)

        running_time_heft = heft.running_time
        running_time_cpop = cpop.running_time

        # print("time", running_time_heft, running_time_cpop)

        def write_slr_speedup():
            """computing schedule length ratio"""
            file_path = "performance/v=" + str(v) + "/"
            filename = file_path + "slr_speedup_heft_cpop.txt"
            file_dir = os.path.split(filename)[0]
            if not os.path.isdir(file_dir):
                os.makedirs(file_dir)
            with open(filename, 'a') as file_object:
                info = str(v) + "  " + str(min_costs) + "  " + str(cp_min_costs) + "  " + str(
                    heft_makespan) + "  " \
                       + str(cpop_makespan) + "  " + str(slr_heft) + "  " + str(slr_cpop) + "  " + str(
                    speedup_heft) \
                       + "  " + str(speedup_cpop) + "  " + str(running_time_heft) + "  " + str(
                    running_time_cpop) + "\n"
                file_object.write(info)

        write_slr_speedup()

        print(slr_heft, slr_cpop, speedup_heft, speedup_cpop)
        n += 1
コード例 #6
0
ファイル: mpq_ga.py プロジェクト: wzwtime/DAG
 def task_to_processor_mapping_fitness(self, q_, n_, v_):
     max_makespan = 0
     makespan_ = []
     for t in range(self.PopSize):
         rank_u = self.Poputation[t]
         new_rank = []
         # print("rank_u =", rank_u)
         for item in range(v_):
             temp_rank = [rank_u[item], 0]
             new_rank.append(temp_rank)
         # print("t =", t, "rank_u =", rank_u)
         heft = heft_new.Heft(q_, n_, v_, new_rank)
         heft_make_span = heft.heft()
         if max_makespan < heft_make_span:
             max_makespan = heft_make_span
         makespan_.append(heft_make_span)
     # print("max_makespan =", max_makespan)
     self.makespan = max_makespan
     # print("makespan =", makespan)
     """evaluate the fitness (makespan)"""
     for m in range(self.PopSize):
         self.fitness.append(max_makespan - makespan_[m] + 1)
コード例 #7
0
ファイル: performance_new.py プロジェクト: wzwtime/DAG
def write(q, v, N, nn):
    n = 1
    while n <= N:
        heft = heft_new.Heft(q, n, v, 0)
        heft_makespan = heft.heft()
        min_costs = heft.min_costs
        """Speedup-HEFT"""
        speedup_heft = heft.speedup

        cpop = cpop_new.Cpop(q, n, v)
        cpop_makespan = cpop.cpop()
        cp_min_costs = cpop.cp_min_costs

        peft_ = peft.Peft(v, q, n)
        peft_makespan = peft_.peft()

        PopSize = v
        g = 0
        mpq_ga_ = mpq_ga.MPQGA(PopSize)
        ga_makespan = mpq_ga_.mpqga(v, q, n, g)
        """slr"""
        slr_heft = round(1.0 * heft_makespan / cp_min_costs, 4)
        slr_cpop = cpop.slr
        slr_peft = round(1.0 * peft_makespan / cp_min_costs, 4)
        slr_ga = round(1.0 * ga_makespan / cp_min_costs, 4)
        """Speedup"""

        print("+++++++++speedup_heft= ", speedup_heft)
        speedup_cpop = round(1.0 * min_costs / cpop_makespan, 4)
        speedup_peft = round(1.0 * min_costs / peft_makespan, 4)
        speedup_ga = round(1.0 * min_costs / ga_makespan, 4)
        """Running Time"""
        time_heft = heft.running_time
        time_cpop = cpop.running_time
        time_peft = peft_.running_time
        time_ga = mpq_ga_.running_time

        # print("time", running_time_heft, running_time_cpop)

        def write_slr_speedup_time(nn):
            """computing schedule length ratio"""
            # mon = time.localtime(time.time())[1]
            # day = time.localtime(time.time())[2]
            mon = 7
            day = 18

            file_path = "performance/" + str(mon) + "_" + str(day) + "/v=" + str(v) + "_PopSize=" + \
                        str(int(PopSize/nn)) + "/"
            filename = file_path + "slr_speedup_heft_cpop.txt"
            file_dir = os.path.split(filename)[0]
            if not os.path.isdir(file_dir):
                os.makedirs(file_dir)
            with open(filename, 'a') as file_object:
                info = str(v) + "  " + str(q) + "  " + str(min_costs) + "  " + str(cp_min_costs) + "  " \
                       + str(heft_makespan) + "  " + str(cpop_makespan) + "  " + str(peft_makespan) + "  " \
                       + str(slr_heft) + "  " + str(slr_cpop) + "  " + str(slr_peft) + "  " + str(speedup_heft) + "  " \
                       + str(speedup_cpop) + "  " + str(speedup_peft) + "  " + str(time_heft) + "  " + str(time_cpop) \
                       + "  " + str(time_peft) + "  " + str(ga_makespan) + "  " + str(slr_ga) + "  " + str(speedup_ga) \
                       + "  " + str(time_ga) + "\n"
                file_object.write(info)

        write_slr_speedup_time(nn)
        print("---------------------", n)
        print("slr =", slr_heft, slr_cpop, slr_peft, slr_ga)
        print("speedup =", speedup_heft, speedup_cpop, speedup_peft,
              speedup_ga)
        print("time =", time_heft, time_cpop, time_peft, time_ga)
        n += 1
コード例 #8
0
def cyclic(N, Q, V, N1, N2, discount_factor, lr):
    """N is the number of DAG, Q is the number of processors"""
    t = 1
    while t <= N:
        heft = heft_new.Heft(Q, t, V)
        heft_make_span = heft.heft()
        num_task = heft.v
        env = SchedEnv(num_task, Q, t)
        agent = REINFORCE(Q, N1, N2, discount_factor, lr)
        # print(env.dag, heft_make_span)

        global_step = 0
        scores, episodes = [], []
        make_spans = []

        for e in range(EPISODES):
            done = False
            score = 0
            # fresh env
            state = env.reset(num_task, Q, t)
            state = np.reshape(state, [1, 1 + 2 * Q])

            while not done:
                global_step += 1
                # get action for the current state and go one step in environment
                action = agent.get_action(state) + 1

                next_state, reward, done = env.step(action)
                if len(next_state) != 1:
                    next_state = np.reshape(next_state, [1, 1 + 2 * Q])

                agent.append_sample(state, action, reward)
                score += reward
                state = copy.deepcopy(next_state)

                if done:
                    # update policy neural network for each episode

                    agent.train_model()
                    scores.append(score)
                    make_spans.append(env.makespan)
                    episodes.append(e)

                    score = round(score, 2)
                    # print("episode:", e, "  score:", score, "  time_step:", global_step)
                    if e % 10 == 0:
                        print("episode:", e, "  makespan:", score,
                              "  time_step:", global_step)

            if e % 10 == 0:

                pylab.plot(episodes, scores, 'b')

                pylab.xlabel("Episode")
                pylab.ylabel("- Makespan")
                # pylab.plot(episodes, make_spans, 'b')
                # pylab.axhline(-heft.make_span, linewidth=0.5, color='r')
                pylab.axhline(-heft_make_span, linewidth=0.5, color='r')
                """"""
                n, ccr, alpha, beta, q = read_parameter(Q, t, V)

                info = str(N1) + "*" + str(N2) + " ccr=" + str(
                    ccr) + " alpha=" + str(alpha) + " beta=" + str(beta)
                pylab.title(info)
                info1 = "_ccr=" + str(ccr) + " alpha=" + str(
                    alpha) + " beta=" + str(beta)
                save_path = "./save_graph/v=" + str(V) + "q=" + str(Q) + "/"
                save_name = save_path + "_" + str(t) + info1 + ".png"
                file_dir = os.path.split(save_name)[0]
                if not os.path.isdir(file_dir):
                    os.makedirs(file_dir)
                pylab.savefig(save_name)
                pylab.close()
                if N >= 50:
                    agent.model.save_weights(
                        "./save_model/REINFORCE_trained_v=" + str(V) + "q=" +
                        str(q) + "_" + str(N1) + "_" + str(N2) + ".h5")

        t += 1
コード例 #9
0
            self.state.append(self.computation_costs[0][index])
        self.next_state_ = []  # the number of remaining tasks n, est, wij
        self.scheduler = [
        ]  # tasks were scheduling on processors.[{1: 3}, {4: 1}, {3: 2}, ...]
        self.Pi = {}  # the task information on processors

        self.reward_ = []
        self.makespan = 0
        return self.state


if __name__ == "__main__":
    Q = 4
    n = 1
    V = 20
    heft = heft_new.Heft(Q, n, V)
    heft.heft()

    env = SchedEnv(V, Q, n)
    print(env.state)

    print(env.dag)
    print(env.computation_costs)
    print(env.pred)
    print(env.rank_u)

    # num_pi = len(heft.computation_costs[0])
    num_pi = Q
    done = False
    e = 0
    makespans = []
コード例 #10
0
ファイル: reinforce.py プロジェクト: wzwtime/DAG
def cyclic(N, Q, V, N1, N2, discount_factor, lr):
    """N is the number of DAG, Q is the number of processors"""
    t = 14

    while t <= N:
        heft = heft_new.Heft(Q, t, V)
        heft_make_span = heft.heft()

        cpop = cpop_new.Cpop(Q, t, V)
        cpop.cpop()
        cp_min_costs = cpop.cp_min_costs
        min_costs = heft.min_costs

        env = SchedEnv(V, Q, t)
        agent = REINFORCE(Q, N1, N2, discount_factor, lr)
        global_step = 0
        scores, episodes = [], []
        make_spans = []
        count = 0
        best_makespan = 10000

        for e in range(EPISODES):
            done = False
            score = 0
            # fresh env
            state = env.reset(V, Q, t)
            state = np.reshape(state, [1, 1 + 2 * Q])
            rein_start_time = time.time()
            rein_end_time = 0

            while not done:
                global_step += 1
                # get action for the current state and go one step in environment
                action = agent.get_action(state) + 1

                next_state, reward, done = env.step(action)
                if len(next_state) != 1:
                    next_state = np.reshape(next_state, [1, 1 + 2 * Q])

                agent.append_sample(state, action, reward)
                score += reward
                state = copy.deepcopy(next_state)

                if done:
                    # update policy neural network for each episode
                    rein_end_time = time.time()

                    agent.train_model()
                    scores.append(score)
                    make_spans.append(env.makespan)
                    episodes.append(e)

                    score = round(score, 2)
                    # print("episode:", e, "  score:", score, "  time_step:", global_step)

                    # record good score
                    if (score > -heft_make_span) and (best_makespan > -score):
                        best_makespan = -score

                    if e % 10 == 0:
                        print("episode:", e, "  makespan:", score,
                              "  time_step:", global_step)

            if e % 10 == 0:

                pylab.plot(episodes, scores, 'b')

                pylab.xlabel("Episode")
                pylab.ylabel("- Makespan")
                pylab.axhline(-heft_make_span, linewidth=0.5, color='r')
                """"""
                n, ccr, alpha, beta, q = read_parameter(Q, t, V)

                info = str(N1) + "*" + str(N2) + " ccr=" + str(
                    ccr) + " alpha=" + str(alpha) + " beta=" + str(beta)
                pylab.title(info)
                info1 = "_ccr=" + str(ccr) + " alpha=" + str(
                    alpha) + " beta=" + str(beta)

                mon = time.localtime(time.time())[1]
                day = time.localtime(time.time())[2]

                save_path = "./save_graph/" + str(mon) + "_" + str(day) + "/v=" + str(V) + "q=" + str(Q) + "n1=n2=" \
                            + str(N1) + "d=" + str(discount_factor) + "lr=" + str(lr)+"/"

                save_name = save_path + "_" + str(t) + info1 + ".png"
                file_dir = os.path.split(save_name)[0]
                if not os.path.isdir(file_dir):
                    os.makedirs(file_dir)
                pylab.savefig(save_name)
                """"save the better performance graph"""
                if score > -heft_make_span:
                    # count += 1
                    save_path1 = "./save_graph/good/" + str(mon) + "_" + str(day) + "/v=" + str(V) + "q=" + str(Q) \
                                 + "n1=n2=" + str(N1) + "d=" + str(discount_factor) + "lr=" + str(lr) + "/"
                    save_name = save_path1 + "_" + str(t) + info1 + ".png"
                    file_dir = os.path.split(save_name)[0]
                    if not os.path.isdir(file_dir):
                        os.makedirs(file_dir)
                    pylab.savefig(save_name)
                """
                if count > EPISODES * 0.005:
                    save_path1 = "./save_graph/good/" + str(mon) + "_" + str(day) + "/v=" + str(V) + "q=" + str(Q) \
                                 + "n1=n2=" + str(N1) + "d=" + str(discount_factor) + "lr=" + str(lr) + "/"
                    save_name = save_path1 + "_" + str(t) + info1 + ".png"
                    file_dir = os.path.split(save_name)[0]
                    if not os.path.isdir(file_dir):
                        os.makedirs(file_dir)
                    pylab.savefig(save_name)
                """

                pylab.close()
                if N >= 50:
                    agent.model.save_weights(
                        "./save_model/REINFORCE_trained_v=" + str(V) + "q=" +
                        str(q) + "_" + str(N1) + "_" + str(N2) + ".h5")

            if best_makespan < heft_make_span and e == EPISODES - 1:
                """"save makespan data"""
                f_path = "makespan/v=" + str(V) + "n=" + str(t) + "/"
                f_name = f_path + "makespan.txt"
                file_dir1 = os.path.split(f_name)[0]
                if not os.path.isdir(file_dir1):
                    os.makedirs(file_dir1)
                fl = open(f_name, 'w')
                fl.write(str(-heft_make_span))
                fl.write("\n")
                for i in scores:
                    fl.write(str(i))
                    fl.write("\n")
                fl.close()

                slr_rein = round(1.0 * best_makespan / cp_min_costs, 4)
                speedup_rein = round(1.0 * min_costs / best_makespan, 4)
                running_time_rein = int(
                    round((rein_end_time - rein_start_time), 3) * 1000)
                file_path = "performance/test_v=" + str(V) + "/"
                filename = file_path + "slr_speedup_heft_cpop.txt"
                file_dir__ = os.path.split(filename)[0]
                if not os.path.isdir(file_dir__):
                    os.makedirs(file_dir__)
                with open(filename, 'a') as file_object:
                    info__ = str(V) + "  " + str(Q) + "  " + str(t) + "  " + str(slr_rein) + "  " \
                             + str(speedup_rein) + "  " + str(running_time_rein) + "\n"
                    file_object.write(info__)

        t += 1