Exemple #1
0
def heft(wf_name, nodes):
    """
    Heft algorithm

    :return:
    """
    rm = ExperimentResourceManager(rg.r(nodes))
    estimator = ModelTimeEstimator(bandwidth=10)
    _wf = wf(wf_name[0])
    heft_schedule = run_heft(_wf, rm, estimator)
    actions = [(proc.start_time, int(proc.job.global_id), node.name_id)
               for node in heft_schedule.mapping
               for proc in heft_schedule.mapping[node]]
    actions = sorted(actions, key=lambda x: x[0])
    actions = [(action[1], action[2]) for action in actions]

    test_wfs, test_times, test_scores, test_size = wf_setup(wf_name)
    ttree, tdata, trun_times = test_wfs[0]
    wfl = ctx.Context(len(_wf.get_all_unique_tasks()), nodes, trun_times,
                      ttree, tdata)
    reward = 0
    end_time = 0
    for task, node in actions:
        task_id = wfl.candidates.tolist().index(task)
        reward, end_time = wfl.make_action(task_id, node)

    draw_heft_schedule(heft_schedule.mapping, wfl.worst_time, len(actions),
                       'h', '1')
    response = {'reward': reward, 'makespan': end_time}
    return reward, end_time
def episode(ei, config, test_wfs, test_size):
    global URL
    ttree, tdata, trun_times = test_wfs[ei % test_size]
    wfl = ctx.Context(config['agent_task'], config['nodes'], trun_times, ttree,
                      tdata)
    wfl.name = config['wfs_name'][ei % test_size]
    done = wfl.completed
    state = list(map(float, list(wfl.state)))
    sars_list = list()
    for act_time in range(100):
        if act_time > 100:
            raise Exception("attempt to provide action after wf is scheduled")
        mask = list(map(int, list(wfl.get_mask())))
        action = requests.post(f'{URL}act',
                               json={
                                   'state': state,
                                   'mask': mask,
                                   'sched': False
                               }).json()['action']
        act_t, act_n = wfl.actions[action]
        reward, wf_time = wfl.make_action(act_t, act_n)
        next_state = list(map(float, list(wfl.state)))
        done = wfl.completed
        sars_list.append((state, action, reward, next_state, done))
        state = next_state
        if done:
            return reward, sars_list
def test_agent(args):
    global URL
    config = parameter_setup(args, DEFAULT_CONFIG)
    test_wfs, test_times, test_scores, test_size = wf_setup(config['wfs_name'])
    for i in range(test_size):
        best_saves = []
        for k in range(5):
            ttree, tdata, trun_times = test_wfs[i]
            wfl = ctx.Context(config['agent_task'], config['nodes'],
                              trun_times, ttree, tdata)
            wfl.name = config['wfs_name'][i]
            done = wfl.completed
            state = list(map(float, list(wfl.state)))
            for time in range(wfl.n):
                mask = list(map(int, list(wfl.get_mask())))
                action = requests.post(f'{URL}test',
                                       json={
                                           'state': state,
                                           'mask': mask,
                                           'sched': False
                                       }).json()['action']
                act_t, act_n = wfl.actions[action]
                reward, wf_time = wfl.make_action(act_t, act_n)
                next_state = list(map(float, list(wfl.state)))
                done = wfl.completed
                state = next_state
                if done:
                    test_scores[i].append(reward)
                    test_times[i].append(wf_time)
                    best_saves.append((wfl, wf_time))
                    break
        best_saves = sorted(best_saves, key=lambda x: x[1])
        write_schedule(i, best_saves[0][0])
def interective_test(model, args):
    """
    Interective Test

    :param model:
    :param args:
    :return:
    """
    config = parameter_setup(args, DEFAULT_CONFIG)
    test_wfs, test_times, test_scores, test_size = wf_setup(config['wfs_name'])
    for i in range(test_size):
        ttree, tdata, trun_times = test_wfs[i]
        wfl = ctx.Context(config['agent_task'], config['nodes'], trun_times,
                          ttree, tdata)
        sch = ScheduleInterectivePlotter(wfl.worst_time, wfl.m, wfl.n)
        wfl.name = config['wfs_name'][i]
        if config['actor_type'] == 'rnn':
            deq = RNNDeque(seq_size=config['seq_size'],
                           size=config['state_size'])
        done = wfl.completed
        state = wfl.state
        for time in range(wfl.n):
            mask = wfl.get_mask()
            q = model.act_q(state.reshape(1, state.shape[0]), mask, False)
            q = np.squeeze(q, axis=0) if len(q.shape) > 1 else q
            action_idx = np.argmax(q)
            actions = [wfl.actions[action] for action in range(q.shape[-1])]
            best_t, best_n = actions[action_idx]
            copies_of_wfl = [deepcopy(wfl) for _ in range(len(actions))]
            reward, wf_time = wfl.make_action(best_t, best_n)
            next_state = wfl.state

            acts = []
            for idx, action in enumerate(actions):
                wfl_copy = copies_of_wfl[idx]
                t, n = action
                if q[idx] != 0 or idx == action_idx:
                    reward, wf_time, item = wfl_copy.make_action_item(t, n)
                    acts.append((item, reward, n))
            sch.draw_item(wfl.schedule, acts)
            if config['actor_type'] == 'rnn':
                deq.push(next_state)
                next_state = deq.show()
            done = wfl.completed
            state = next_state
            if done:
                test_scores[i].append(reward)
                test_times[i].append(wf_time)
        write_schedule(args.run_name, i, wfl)
def episode(model, ei, config, test_wfs, test_size):
    """
    Run one episode of learning for algorithm based on NN

    :param model:
    :param ei:
    :param config:
    :param test_wfs:
    :param test_size:
    :return:
    """
    ttree, tdata, trun_times = test_wfs[ei % test_size]
    wfl = ctx.Context(config['agent_task'], config['nodes'], trun_times, ttree,
                      tdata)
    wfl.name = config['wfs_name'][ei % test_size]
    if config['actor_type'] == 'rnn':
        deq = RNNDeque(seq_size=config['seq_size'], size=config['state_size'])
    done = wfl.completed
    state = wfl.state
    if config['actor_type'] == 'rnn':
        deq.push(state)
        state = deq.show()
    sars_list = list()
    reward = 0
    for act_time in range(100):
        if act_time > 100:
            raise Exception("attempt to provide action after wf is scheduled")
        mask = wfl.get_mask()
        # action = requests.post(f"{URL}act", json={'state': state, 'mask': mask, 'sched': False}).json()['action']
        action = model.act(state.reshape(1, state.shape[0]), mask, False)
        act_t, act_n = wfl.actions[action]
        reward, wf_time = wfl.make_action(act_t, act_n)
        next_state = wfl.state
        if config['actor_type'] == 'rnn':
            deq.push(next_state)
            next_state = deq.show()
        done = wfl.completed
        sars_list.append((state, action, reward, next_state, done))
        state = next_state
        if done:
            return reward, sars_list
    return reward, sars_list
Exemple #6
0
def episode(ei, config, test_wfs, test_size, URL):
    tree, data, run_times = test_wfs[ei % test_size]
    wfl = ctx.Context(config['agent_task'], config['nodes'], run_times, tree,
                      data)
    wfl.name = config['wfs_name'][ei % test_size]
    if config['actor_type'] == 'rnn':
        deq = RNNDeque(seq_size=config['seq_size'], size=config['state_size'])
    done = wfl.completed
    state = list(map(float, list(wfl.state)))
    if config['actor_type'] == 'rnn':
        deq.push(state)
        state = deq.show()
    sars_list = list()
    reward = 0
    for act_time in range(100):
        if act_time > 100:
            raise Exception("attempt to provide action after wf is scheduled")
        mask = list(map(int, list(wfl.get_mask())))
        state = state.tolist() if type(state) != list else state
        action = requests.post(f"{URL}act",
                               json={
                                   'state': state,
                                   'mask': mask,
                                   'sched': False
                               }).json()['action']
        act_t, act_n = wfl.actions[action]
        reward, wf_time = wfl.make_action(act_t, act_n)
        next_state = list(map(float, list(wfl.state)))
        if config['actor_type'] == 'rnn':
            deq.push(next_state)
            next_state = deq.show()
            next_state = next_state.tolist()
        done = wfl.completed
        sars_list.append((state, action, reward, next_state, done))
        state = next_state

        if done:
            replay(config['batch_size'], URL)
            return reward, sars_list

    return reward, sars_list
Exemple #7
0
def episode(ei):

    # генерируем wf
    # random
    # tasks_n = np.random.randint(task_par_min, task_par+1)
    # tree, data, run_times = tree_data_gen(tasks_n)
    # test sample
    ttree, tdata, trun_times = test_wfs[ei % test_size]
    wfl = ctx.Context(agent_tasks, nodes, trun_times, ttree, tdata)
    wfl.name = wfs_names[ei % test_size]
    # real
    # wf_real = random.choice(wfs_real)
    # tree, data, run_times = tree_data_wf(wf_real)
    # real or random
    # wfl = ctx.Context(agent_tasks, nodes, run_times, tree, data)
    # wfl.name = "random"
    done = wfl.completed
    state = wfl.state
    state = np.reshape(state, [1, state_size])
    # ep_memory = []
    # import pprint
    # pprint.pprint(wfl.get_state_map())
    sars_list = list()
    for act_time in range(100):
        if act_time > 100:
            raise Exception("attempt to provide action after wf is scheduled")
        mask = wfl.get_mask()
        action = agent.act(state, mask)
        act_t, act_n = wfl.actions[action]
        reward, wf_time = wfl.make_action(act_t, act_n)
        next_state = wfl.state
        done = wfl.completed
        next_state = np.reshape(next_state, [1, state_size])
        # запоминаем цепочку действий
        #agent.remember((state, action, reward, next_state, done))
        sars_list.append((state, action, reward, next_state, done))
        # ep_memory.append((state, action, reward, next_state, done))
        state = next_state
        if done:
            return reward, sars_list
def test(model, args):
    """
    Create Schedule using current NN without learning parameters

    :param model:
    :param args:
    :return:
    """
    config = parameter_setup(args, DEFAULT_CONFIG)
    test_wfs, test_times, test_scores, test_size = wf_setup(config['wfs_name'])
    for i in range(test_size):
        ttree, tdata, trun_times = test_wfs[i]
        wfl = ctx.Context(config['agent_task'], config['nodes'], trun_times,
                          ttree, tdata)
        wfl.name = config['wfs_name'][i]
        if config['actor_type'] == 'rnn':
            deq = RNNDeque(seq_size=config['seq_size'],
                           size=config['state_size'])
        done = wfl.completed
        state = wfl.state
        if config['actor_type'] == 'rnn':
            deq.push(state)
            state = deq.show()
        for time in range(wfl.n):
            mask = wfl.get_mask()
            action = model.act(state.reshape(1, state.shape[0]), mask, False)
            act_t, act_n = wfl.actions[action]
            reward, wf_time = wfl.make_action(act_t, act_n)
            next_state = wfl.state
            if config['actor_type'] == 'rnn':
                deq.push(next_state)
                next_state = deq.show()
            done = wfl.completed
            state = next_state
            if done:
                test_scores[i].append(reward)
                test_times[i].append(wf_time)
        write_schedule(args.run_name, i, wfl)
Exemple #9
0
                    .format(e, EPISODES, reward, mean_scores,
                            last_mean_scores))

        # обучаем нейронку
        if e % 10 == 0:
            if len(agent.D) > batch_size:
                loss += agent.replay(batch_size, e)
        if e % (50000) == 0:
            # test wfs
            eps = agent.epsilon
            agent.epsilon = 0.0
            for i in range(test_size):
                best_saves = list()
                for k in range(5):
                    ttree, tdata, trun_times = test_wfs[i]
                    wfl = ctx.Context(agent_tasks, nodes, trun_times, ttree,
                                      tdata)
                    wfl.name = wfs_names[i]
                    done = wfl.completed
                    state = wfl.state
                    state = np.reshape(state, [1, state_size])
                    for time in range(wfl.n):
                        mask = wfl.get_mask()
                        action = agent.act(state, mask)
                        act_t, act_n = wfl.actions[action]
                        reward, wf_time = wfl.make_action(act_t, act_n)
                        next_state = wfl.state
                        done = wfl.completed
                        next_state = np.reshape(next_state, [1, state_size])
                        state = next_state
                        if done:
                            test_scores[i].append(reward)
Exemple #10
0
    state_size = 87
    action_size = task_par * proc_par
    agent = actor.DQNAgent(state_size, action_size)
    # agent.load("model_10000.h5")
    # agent.load("model_10000.h5")
    agent.load("model_50000.h5")
    # agent.load("model_0.h5")

    nodes = np.array([4, 8])

    wf_name = "floodplain"
    wf_path = "..\\resources\\{0}.xml".format(wf_name)
    wfl = read_workflow(wf_path, wf_name)

    tree, data, run_times = tree_data_wf(wfl)
    wfl = ctx.Context(nodes, run_times, tree, data)

    done = wfl.completed
    state = wfl.state
    state = np.reshape(state, [1, state_size])
    for i in range(wfl.n):
        mask = wfl.get_mask()
        action = agent.act(state, mask, True)
        act_t, act_n = wfl.actions[action]
        reward, wf_time = wfl.make_action(act_t, act_n)
        next_state = wfl.state
        done = wfl.completed
        next_state = np.reshape(next_state, [1, state_size])
        if done:
            print("Completed")
            wfl.wf_name = wf_name