Example #1
0
# serialize model to JSON
model_save = model.to_json()
with open("save/NNmodel1.json", "w") as json_file:
    json_file.write(model_save)

print("Saved model to disk!")

# Finally, we configure and compile our agent. You can use every built-in Keras optimizer and
# even the metrics!
memory = SequentialMemory(limit=10000, window_length=1)
#policy1 = BoltzmannQPolicy()
policy1 = EpsDisGreedyQPolicy(eps=0.0001, eps_decay=0.999)
policy2 = BoltzmannQPolicy()

callback12 = FileLogger(filepath='save/history12_{}'.format(timenow),
                        interval=1)

dqn8 = DQNAgent(model=model,
                nb_actions=nb_actions,
                memory=memory,
                gamma=0.99,
                nb_steps_warmup=2000,
                target_model_update=1,
                policy=policy1)
dqn8.compile(Adam(lr=1e-3), metrics=['mae'])
history8 = dqn8.fit(env,
                    nb_epsteps=3000,
                    visualize=False,
                    callbacks=[callback12],
                    verbose=2)
Example #2
0
model.add(Activation('relu'))
model.add(Dense(nb_actions))
model.add(Activation('linear'))
print(model.summary())

# serialize model to JSON
model_save = model.to_json()
with open("save/NNmodel1.json", "w") as json_file:
    json_file.write(model_save)

print("Saved model to disk!")

memory = SequentialMemory(limit=100000, window_length=1)
policy1 = BoltzmannQPolicy()
policy2 = EpsGreedyQPolicy()
callback1 = FileLogger(filepath='save/history1_{}'.format(timenow), interval=1)
callback2 = FileLogger(filepath='save/history2_{}'.format(timenow), interval=1)
callback3 = FileLogger(filepath='save/history3_{}'.format(timenow), interval=1)
callback4 = FileLogger(filepath='save/history4_{}'.format(timenow), interval=1)
'''
dqn1 = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=1000,
               target_model_update=1e-2, policy=policy1)
dqn1.compile(Adam(lr=1e-3), metrics=['mae'])
history1 = dqn1.fit(env, nb_epsteps=100, visualize=False, callbacks=[callback1], verbose=2)

dqn2 = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=1000,
               target_model_update=1e-2, policy=policy2)
dqn2.compile(Adam(lr=1e-3), metrics=['mae'])
history2 = dqn2.fit(env, nb_epsteps=100, visualize=False, callbacks=[callback2], verbose=2)

dqn3 = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=1000,