コード例 #1
0
  def test_failure_window(self, mock_time):
    config = AgentConfig("", "")
    original_config = config.get(AgentConfig.COMMAND_SECTION, AgentConfig.AUTO_RESTART)
    config.set(AgentConfig.COMMAND_SECTION, AgentConfig.AUTO_RESTART, '2,1')
    ## The behavior of side_effect is different when you run tests in command line and when you do it through IDE
    ## So few extra items are there in the list
    mock_time.side_effect = [200, 500, 500]
    controller5 = Controller.Controller(config)

    try:
      self.assertTrue(controller5.shouldAutoRestart())
      self.assertTrue(controller5.shouldAutoRestart())
    finally:
      config.set(AgentConfig.COMMAND_SECTION, AgentConfig.AUTO_RESTART, original_config)
コード例 #2
0
ファイル: TestController.py プロジェクト: xgong/slider
    def setUp(self, hostname_method, NetUtil_mock, lockMock, threadMock):

        Controller.logger = MagicMock()
        lockMock.return_value = MagicMock()
        NetUtil_mock.return_value = MagicMock()
        hostname_method.return_value = "test_hostname"

        config = MagicMock()
        config.get.return_value = "something"
        config.getResolvedPath.return_value = "something"

        self.controller = Controller.Controller(config)
        self.controller.netutil.MINIMUM_INTERVAL_BETWEEN_HEARTBEATS = 0.1
        self.controller.netutil.HEARTBEAT_NOT_IDDLE_INTERVAL_SEC = 0.1
コード例 #3
0
  def test_failure_window2(self):
    config = MagicMock()
    config.getErrorWindow.return_value = (0, 0)
    controller = Controller.Controller(config)

    self.assertTrue(controller.shouldAutoRestart())

    config.getErrorWindow.return_value = (0, 1)
    self.assertTrue(controller.shouldAutoRestart())

    config.getErrorWindow.return_value = (1, 0)
    self.assertTrue(controller.shouldAutoRestart())

    config.getErrorWindow.return_value = (-1, -1)
    self.assertTrue(controller.shouldAutoRestart())

    config.getErrorWindow.return_value = (1, 1)
    self.assertTrue(controller.shouldAutoRestart())

    #second failure within a minute
    self.assertFalse(controller.shouldAutoRestart())

    #do not reset unless window expires
    self.assertFalse(controller.shouldAutoRestart())
コード例 #4
0
from agent import Controller

# Load controller
# E10000: (S?/R0/A?0, S1/R1/A80 ,S2/R2/A90, S1/R3/A90)
# E5000: (S4/R0/A80, S4/R1/A70 ,S4/R2/A60, S4/R3/A10)

SEED = 4
REWARD_TYPE = 2
controller = Controller(rand_seed=SEED, rew_type=REWARD_TYPE)

# Training
NUM_EPISODES = 10000
MAX_TIME_STEPS = 150
MODEL_SAVE_NAME = 'reacher'

# controller.train(num_episodes = NUM_EPISODES, max_timesteps = MAX_TIME_STEPS, model_name = MODEL_SAVE_NAME)

# Testing
NUM_TESTS = 10
MODEL_LOAD_NAME = 'reacher_' + str(NUM_EPISODES) + '_' + str(REWARD_TYPE)

controller.test(num_test=NUM_TESTS,
                max_timesteps=MAX_TIME_STEPS,
                model_name=MODEL_LOAD_NAME)
コード例 #5
0
else:
	device_id = int(args.device_id)

from Environment import Environment
env = Environment(task=task,seed=seed) 
num_actions = env.env.action_space.n

# create expereince memory
from memory import ExperienceMemory 
experience_memory = ExperienceMemory(size=batch_size) 

# create agent 
from agent import Controller
controller = Controller(experience_memory=experience_memory,
						num_actions=num_actions,
						batch_size=batch_size,
						seed=seed,
						use_multiple_gpu=use_multiple_gpu,
						device_id=device_id) 

# create quasi-Newton optimizier
from quasi_newton import QUASI_NEWTON
qn = QUASI_NEWTON(controller=controller, 
					 m=m,
					 search_method=search_method,
					 quasi_newton_matrix=quasi_newton_matrix,
					 seed=seed)

# create the trainer
from trainer import Trainer
atari_trainer = Trainer(env=env,
				 controller=controller,
コード例 #6
0
lr = float(args.lr)
optim = args.optim
max_iter = int(args.max_iter)

from Environment import Environment
env = Environment(task=task)
num_actions = env.env.action_space.n
print('number of actions: ', num_actions)

# create expereince memory
from memory import ExperienceMemory
experience_memory = ExperienceMemory()

# create agent
from agent import Controller
controller = Controller(experience_memory=experience_memory,
                        num_actions=num_actions,
                        lr=lr,
                        batch_size=batch_size,
                        optim_method=optim,
                        use_multiple_gpu=False)

# create the trainer
from trainer import Trainer
atari_trainer = Trainer(env=env,
                        controller=controller,
                        experience_memory=experience_memory,
                        max_iter=max_iter)

# run the training loop
atari_trainer.train()