def test_sequence_value_string(self): room_size = 3 num_rooms = 3 mdp = mdps.MazeMDP(room_size, num_rooms) mdp.compute_states() mdp.EXIT_REWARD = 1 mdp.MOVE_REWARD = -0.1 discount = 1 sequence_length = 2 batch_size = 10 learning_rate = 1e-3 freeze_interval = 10000 num_hidden = 4 eps = .5 reg = 1e-8 num_actions = len(mdp.get_actions(None)) batch_size = 100 network = recurrent_qnetwork.RecurrentQNetwork( input_shape=2 * room_size, sequence_length=sequence_length, batch_size=batch_size, num_actions=4, num_hidden=num_hidden, discount=discount, learning_rate=learning_rate, regularization=reg, update_rule='adam', freeze_interval=freeze_interval, network_type='single_layer_lstm', rng=None) num_epochs = 5 epoch_length = 10 test_epoch_length = 0 max_steps = (room_size * num_rooms)**2 epsilon_decay = (num_epochs * epoch_length * max_steps) / 2 adapter = state_adapters.CoordinatesToSingleRoomRowColAdapter( room_size=room_size) p = policy.EpsilonGreedy(num_actions, eps, 0.05, epsilon_decay) rm = replay_memory.SequenceReplayMemory( input_shape=2 * room_size, sequence_length=sequence_length, batch_size=batch_size, capacity=50000) log = logger.NeuralLogger(agent_name='RecurrentQNetwork') a = agent.RecurrentNeuralAgent(network=network, policy=p, replay_memory=rm, log=log, state_adapter=adapter) run_tests = False e = experiment.Experiment(mdp, a, num_epochs, epoch_length, test_epoch_length, max_steps, run_tests, value_logging=True) e.log_temporal_value_string()
def test_agent(self): room_size = 5 mdp = mdps.MazeMDP(room_size, 1) mdp.compute_states() mdp.EXIT_REWARD = 1 mdp.MOVE_REWARD = -0.1 discount = mdp.get_discount() num_actions = len(mdp.get_actions(None)) network = qnetwork.QNetwork(input_shape=2 * room_size, batch_size=1, num_actions=4, num_hidden=10, discount=discount, learning_rate=1e-3, update_rule='sgd', freeze_interval=10000, rng=None) p = policy.EpsilonGreedy(num_actions, 0.5, 0.05, 10000) rm = replay_memory.ReplayMemory(1) log = logger.NeuralLogger(agent_name='QNetwork') adapter = state_adapters.CoordinatesToSingleRoomRowColAdapter( room_size=room_size) a = agent.NeuralAgent(network=network, policy=p, replay_memory=rm, logger=log, state_adapter=adapter) num_epochs = 2 epoch_length = 10 test_epoch_length = 0 max_steps = 10 run_tests = False e = experiment.Experiment(mdp, a, num_epochs, epoch_length, test_epoch_length, max_steps, run_tests, value_logging=False) e.run()
def run(learning_rate, freeze_interval, num_hidden, reg, seq_len, eps, nt, update): room_size = 5 num_rooms = 2 input_shape = 2 * room_size print 'building mdp...' mdp = mdps.MazeMDP(room_size, num_rooms) mdp.compute_states() mdp.EXIT_REWARD = 1 mdp.MOVE_REWARD = -0.01 network_type = nt discount = 1 sequence_length = seq_len num_actions = len(mdp.get_actions(None)) batch_size = 100 update_rule = update print 'building network...' network = recurrent_qnetwork.RecurrentQNetwork( input_shape=input_shape, sequence_length=sequence_length, batch_size=batch_size, num_actions=4, num_hidden=num_hidden, discount=discount, learning_rate=learning_rate, regularization=reg, update_rule=update_rule, freeze_interval=freeze_interval, network_type=network_type, rng=None) # take this many steps because (very loosely): # let l be the step length # let d be the difference in start and end locations # let N be the number of steps for the agent to travel a distance d # then N ~ (d/l)^2 // assuming this is a random walk # with l = 1, this gives d^2 in order to make it N steps away # the desired distance here is to walk along both dimensions of the maze # which is equal to two times the num_rooms * room_size # so squaring that gives a loose approximation to the number of # steps needed (discounting that this is actually a lattice (does it really matter?)) # (also discounting the walls) # see: http://mathworld.wolfram.com/RandomWalk2-Dimensional.html max_steps = (2 * room_size * num_rooms)**2 num_epochs = 500 epoch_length = 1 test_epoch_length = 0 epsilon_decay = (num_epochs * epoch_length * max_steps) / 4 print 'building adapter...' adapter = state_adapters.CoordinatesToSingleRoomRowColAdapter( room_size=room_size) print 'building policy...' p = policy.EpsilonGreedy(num_actions, eps, 0.05, epsilon_decay) print 'building replay memory...' # want to track at minimum the last 50 episodes capacity = max_steps * 50 rm = replay_memory.SequenceReplayMemory( input_shape=input_shape, sequence_length=sequence_length, batch_size=batch_size, capacity=capacity) print 'building logger...' log = logger.NeuralLogger(agent_name=network_type) print 'building agent...' a = agent.RecurrentNeuralAgent(network=network, policy=p, replay_memory=rm, log=log, state_adapter=adapter) run_tests = False print 'building experiment...' e = experiment.Experiment(mdp, a, num_epochs, epoch_length, test_epoch_length, max_steps, run_tests, value_logging=True) print 'running experiment...' e.run() ak = file_utils.load_key('../access_key.key') sk = file_utils.load_key('../secret_key.key') bucket = 'hierarchical9' try: aws_util = aws_s3_utility.S3Utility(ak, sk, bucket) aws_util.upload_directory(e.agent.logger.log_dir) except Exception as e: print 'error uploading to s3: {}'.format(e)
def run(learning_rate, freeze_interval, num_hidden, reg): room_size = 5 num_rooms = 2 mdp = mdps.MazeMDP(room_size, num_rooms) mdp.compute_states() mdp.EXIT_REWARD = 1 mdp.MOVE_REWARD = -0.01 discount = 1 num_actions = len(mdp.get_actions(None)) batch_size = 100 print 'building network...' network = qnetwork.QNetwork(input_shape=2 * room_size + num_rooms**2, batch_size=batch_size, num_hidden_layers=2, num_actions=4, num_hidden=num_hidden, discount=discount, learning_rate=learning_rate, regularization=reg, update_rule='adam', freeze_interval=freeze_interval, rng=None) num_epochs = 50 epoch_length = 2 test_epoch_length = 0 max_steps = 4 * (room_size * num_rooms)**2 epsilon_decay = (num_epochs * epoch_length * max_steps) / 1.5 print 'building policy...' p = policy.EpsilonGreedy(num_actions, 0.5, 0.05, epsilon_decay) print 'building memory...' rm = replay_memory.ReplayMemory(batch_size, capacity=50000) print 'building logger...' log = logger.NeuralLogger(agent_name='QNetwork') print 'building state adapter...' adapter = state_adapters.CoordinatesToRowColRoomAdapter( room_size=room_size, num_rooms=num_rooms) # adapter = state_adapters.CoordinatesToRowColAdapter(room_size=room_size, num_rooms=num_rooms) # adapter = state_adapters.CoordinatesToFlattenedGridAdapter(room_size=room_size, num_rooms=num_rooms) # adapter = state_adapters.IdentityAdapter(room_size=room_size, num_rooms=num_rooms) # adapter = state_adapters.CoordinatesToSingleRoomRowColAdapter(room_size=room_size) print 'building agent...' a = agent.NeuralAgent(network=network, policy=p, replay_memory=rm, log=log, state_adapter=adapter) run_tests = False e = experiment.Experiment(mdp, a, num_epochs, epoch_length, test_epoch_length, max_steps, run_tests, value_logging=True) e.run() ak = file_utils.load_key('../access_key.key') sk = file_utils.load_key('../secret_key.key') bucket = 'hierarchical' try: aws_util = aws_s3_utility.S3Utility(ak, sk, bucket) aws_util.upload_directory(e.agent.logger.log_dir) except Exception as e: print 'error uploading to s3: {}'.format(e)
def test_reduction_decreses_exploration_prob_completely(self): p = policy.EpsilonGreedy(num_actions=4, exploration_prob=1, min_exploration_prob=0, actions_until_min=2) q_values = [1,2,3,4] p.choose_action(q_values) p.choose_action(q_values) self.assertEquals(p.exploration_prob, 0)
def test_deterministic_action_selection(self): p = policy.EpsilonGreedy(num_actions=4, exploration_prob=0, min_exploration_prob=0, actions_until_min=1) q_values = [1,2,3,4] actual = p.choose_action(q_values) expected = 3 self.assertEquals(actual, expected)