Exemplo n.º 1
0
    split_step = training_steps

PRINT = len(sys.argv) > 1 and sys.argv[1] == "-p"
if PRINT: num_tests = 1

conf = ModelConf(CONF_FILE)
assert conf.get_model_type() == MDP_DT, "Wrong model type in MDP-DT example"

total_reward_results = []
total_splits_results = []
good_splits_results  = []

for i in range(num_tests):

    scenario = ComplexScenario(training_steps, load_period, 10, MIN_VMS, MAX_VMS)
    model = MDPDTModel(conf.get_model_conf())
    model.set_state(scenario.get_current_measurements())
    model.set_allow_splitting(False)
    model.set_splitting(split_crit, cons_trans)

    total_reward = 0
    for time in range(training_steps + eval_steps):
    
        if random.uniform(0, 1) < epsilon and time < training_steps:
            action = random.choice(model.get_legal_actions())
        else:
            action = model.suggest_action()

        reward = scenario.execute_action(action)
        meas   = scenario.get_current_measurements()
        model.update(action, meas, reward)
Exemplo n.º 2
0
import sys
sys.path.append(TIRAMOLA_DIR)

from Configuration import ModelConf
from MDPDTModel import MDPDTModel
from Constants import *
import random
import math
from pprint import pprint

CONFIGURATION_FILE = TIRAMOLA_DIR + "examples/dt_split_test/dt.json"

conf = ModelConf(CONFIGURATION_FILE)
assert conf.get_model_type() == MDP_DT, "Wrong model type in MDP-DT example"
model = MDPDTModel(conf.get_model_conf())

def meas(vms, load):
    return { NUMBER_OF_VMS: vms, TOTAL_LOAD: load }

add = (ADD_VMS, 1)
nop = (NO_OP, 0)
rem = (REMOVE_VMS, 1)

model.set_state(meas(1, 1))
model.update(nop, meas(2, 2), 5)
model.update(add, meas(3, 2), 3)

print "Before:"
model.print_state_details()
model.states[1].split(TOTAL_LOAD, [1.5])
Exemplo n.º 3
0
    return {NUMBER_OF_VMS: vms, TOTAL_LOAD: load}


def reward(vms, load):
    return min(10 * vms, load) - 3 * vms


def update(action, vms, load):
    r = reward(vms, load)
    m = meas(vms, load)
    model.update(action, m, r, True)


conf = ModelConf(CONFIGURATION_FILE)
assert conf.get_model_type() == MDP_DT, "Wrong model type in MDP-DT example"
model = MDPDTModel(conf.get_model_conf())
model.set_single_point_splitting()
model.set_allow_splitting(False)

add = (ADD_VMS, 1)
nop = (NO_OP, 0)
rem = (REMOVE_VMS, 1)

model.set_state(meas(1, 10))
update(add, 2, 10)
update(rem, 1, 10)
update(add, 2, 10)
update(rem, 1, 10)
update(add, 2, 12)
update(rem, 1, 12)
Exemplo n.º 4
0
import math
from pprint import pprint

training_steps = 5000
eval_steps     = 2000
split_step     = 1
max_steps      = training_steps + eval_steps
epsilon        = 0.5
splitting      = MAX_POINT
CONF_FILE      = TIRAMOLA_DIR + "examples/dt_read_load/dt.json"

scenario = ReadLoadScenario(training_steps)
conf = ModelConf(CONF_FILE)
assert conf.get_model_type() == MDP_DT, "Wrong model type in MDP-DT example"

model = MDPDTModel(conf.get_model_conf())
model.set_state(scenario.get_current_measurements())
model.set_splitting(splitting)
model.set_allow_splitting(False)

total_reward = 0
for time in range(max_steps):

    if random.uniform(0, 1) < epsilon and time < training_steps:
        action = random.choice(model.get_legal_actions())
    else:
        action = model.suggest_action()

    reward = scenario.execute_action(action)
    meas   = scenario.get_current_measurements()
    model.update(action, meas, reward)
Exemplo n.º 5
0
    return {NUMBER_OF_VMS: vms, TOTAL_LOAD: load}


def reward(vms, load):
    return min(10 * vms, load) - 3 * vms


def update(action, vms, load):
    r = reward(vms, load)
    m = meas(vms, load)
    model.update(action, m, r, True)


conf = ModelConf(CONFIGURATION_FILE)
assert conf.get_model_type() == MDP_DT, "Wrong model type in MDP-DT example"
model = MDPDTModel(conf.get_model_conf())

add = (ADD_VMS, 1)
nop = (NO_OP, 0)
rem = (REMOVE_VMS, 1)

model.set_state(meas(1, 10))
update(add, 2, 10)
update(rem, 1, 10)
update(add, 2, 10)
update(rem, 1, 10)
update(add, 2, 10)
update(rem, 1, 10)

update(add, 2, 20)
update(rem, 1, 20)