Example #1
0
memory = SequentialMemory(limit=50000, window_length=window)
policy = LinearAnnealedPolicy(EpsGreedyQPolicy(),
                              attr='eps',
                              value_max=1.,
                              value_min=.1,
                              value_test=.05,
                              nb_steps=5000)
#policy = EpsGreedyQPolicy(10)
#policy = BoltzmannQPolicy()
dqn = DQNAgent(model=model,
               nb_actions=num_actions,
               memory=memory,
               nb_steps_warmup=window * 3,
               policy=policy)
dqn.compile(Adam(lr=1e-3), metrics=['mae'])
dqn.enable_dueling_network = True
if model_exist:
    dqn.load_weights(model_path)
    dqn.policy = LinearAnnealedPolicy(EpsGreedyQPolicy(),
                                      attr='eps',
                                      value_max=.5,
                                      value_min=.1,
                                      value_test=.05,
                                      nb_steps=5000)

env.set_data_interval(train_start, train_end)
train_history = dqn.fit(env,
                        nb_steps=5000,
                        visualize=False,
                        verbose=2,
                        action_repetition=5)