/
run_fixed.py
executable file
·58 lines (49 loc) · 1.27 KB
/
run_fixed.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
#! /usr/bin/env python
"""
Execute a training run of deep-Q-Leaning with parameters that
are consistent with:
Playing Atari with Deep Reinforcement Learning
NIPS Deep Learning Workshop 2013
"""
import launcher
import sys
class Defaults:
# ----------------------
# RLGlue Parameters
# ----------------------
RLGLUE_PORT = 4096
# ----------------------
# Experiment Parameters
# ----------------------
STEPS_PER_EPOCH = 0
EPOCHS = 100
STEPS_PER_TEST = 10000
# ----------------------
# SUMO Parameters
# ----------------------
BASE_ROM_PATH = "../roms/"
ROM = 'SUMO_FIXED'
FRAME_SKIP = 4
# ----------------------
# Agent/Network parameters:
# ----------------------
UPDATE_RULE = 'rmsprop'
BATCH_ACCUMULATOR = 'mean'
LEARNING_RATE = .0002
DISCOUNT = .95
RMS_DECAY = .99 # (Rho)
RMS_EPSILON = 1e-6
MOMENTUM = 0
EPSILON_START = 1.0
EPSILON_MIN = .1
EPSILON_DECAY = 1000000
PHI_LENGTH = 1
UPDATE_FREQUENCY = 1
REPLAY_MEMORY_SIZE = 1000000
BATCH_SIZE = 32
NETWORK_TYPE = "nips_cuda"
FREEZE_INTERVAL = -1
REPLAY_START_SIZE = 100
IMAGE_RESIZE = 'crop'
if __name__ == "__main__":
launcher.launch(sys.argv[1:], Defaults, __doc__, 'fixed')