def run(weights_filename, nb_episodes):

    # Create environment and model.
    environment_name = weights_filename.split("_")[2]
    environment = create_environment(environment_name)
    observation_shape = environment.observation_space.shape
    nb_actions = environment.action_space.n
    model = build_model(observation_shape, nb_actions)

    # Create memory.
    memory = SequentialMemory(limit=1000000, window_length=1) # TODO Why is this necessary?

    # Create the processor.
    #processor = CarRacingProcessor()

    # Create the DQN-Agent.
    dqn = DQNAgent(
        model=model,
        nb_actions=nb_actions,
        memory=memory,
        enable_dueling_network=True, dueling_type='avg'
        #processor=processor,
        )
    dqn.target_model = dqn.model # TODO Why is this necessary?
    dqn.compile(optimizers.Adam(lr=.00025), metrics=['mae']) # TODO Why is this necessary?

    # Load the weights.
    dqn.load_weights(weights_filename)

    print("Testing:", weights_filename)

    # Test the agent.
    dqn.test(environment, nb_episodes=nb_episodes, visualize=True)
def main():

    # Process weights filename.
    if len(sys.argv) != 2:
        print("Must provide weights file-name.")
        exit(0)
    weights_filename = sys.argv[1]

    # Get the environment and extract the number of actions.
    environment_name = weights_filename.split("_")[
        1]  #"Duckietown-straight_road-v0"
    environment = gym.make(environment_name)
    environment = CarRacingDiscreteWrapper(environment)
    np.random.seed(666)
    nb_actions = environment.action_space.n

    # Build the model.
    model = build_model((WINDOW_LENGTH, ) + INPUT_SHAPE, nb_actions)
    print(model.summary())

    # Create memory.
    memory = SequentialMemory(
        limit=1000000,
        window_length=WINDOW_LENGTH)  # TODO Why is this necessary?

    # Create the processor.
    processor = CarRacingProcessor()

    # Create the DQN-Agent.
    dqn = DQNAgent(
        model=model,
        nb_actions=nb_actions,
        memory=memory,
        processor=processor,
    )
    dqn.target_model = dqn.model  # TODO Why is this necessary?
    dqn.compile(optimizers.Adam(lr=.00025),
                metrics=['mae'])  # TODO Why is this necessary?

    # Load the weights.
    dqn.load_weights(weights_filename)

    # Test the agent.
    dqn.test(environment, nb_episodes=10, visualize=True)
def main():

    # Only works when you provide a file that contains trained weights.
    if len(sys.argv) != 2:
        print("Must provide weights file-name.")
        exit(0)

    # Get the environment and extract the number of actions.
    environment_name = "lawnmower-medium-obstacles-v0"
    environment = gym.make(environment_name)
    environment.print_description()
    nb_actions = environment.action_space.n

    # Build the model.
    model = build_model((WINDOW_LENGTH, ) + INPUT_SHAPE, nb_actions)
    print(model.summary())

    # Create memory.
    memory = SequentialMemory(
        limit=1000000,
        window_length=WINDOW_LENGTH)  # TODO Why is this necessary?

    # Create the processor.
    processor = LawnmowerProcessor()

    # Create the DQN-Agent.
    dqn = DQNAgent(
        model=model,
        nb_actions=nb_actions,
        memory=memory,
        processor=processor,
    )
    dqn.target_model = dqn.model  # TODO Why is this necessary?
    dqn.compile(optimizers.Adam(lr=.00025),
                metrics=['mae'])  # TODO Why is this necessary?

    # Load the weights.
    weights_filename = sys.argv[1]
    dqn.load_weights(weights_filename)

    # Test the agent.
    dqn.test(environment, nb_episodes=10, visualize=True)