def run():
    """Run the agent for a finite number of trials."""
    # create output file
    target_dir = os.path.dirname(os.path.realpath(__file__))
    target_path = os.path.join(target_dir, 'qlearning_tuning_report.txt')
    if not os.path.exists(target_dir):
        os.makedirs(target_dir)
	# loop the parameters
    for epsilon in [0.1, 0.5, 0.9]:
        for alpha in np.arange(0.1, 1, 0.2):
            for gamma in np.arange(0.1, 1, 0.2):
                print epsilon, alpha, gamma
                # Set up environment and agent
                e = Environment()  # create environment (also adds some dummy traffic)
                a = e.create_agent(QAgent, epsilon, alpha, gamma)  # create agent
                e.set_primary_agent(a, enforce_deadline=True)  # specify agent to track
				# NOTE: You can set enforce_deadline=False while debugging to allow longer trials

				# Now simulate it
                sim = Simulator(e, update_delay=0.001, display=False)  # create simulator (uses pygame when display=True, if available)
				# NOTE: To speed up simulation, reduce update_delay and/or set display=False
                sim.run(n_trials=100)  # run for a specified number of trials
                # get the count for the number of successful trials and average running time
                summary = sim.report()
                
                # write out the results
                try:
					with open(target_path, 'a') as f:
						f.write('epsilon {}, alpha {}, gamma {} : success {}, avg_time {}, total_reward {}\n'.format(epsilon, alpha, gamma, summary[0], summary[1], round(a.total_reward, 3)))
						f.close()
                except:
					raise