Ejemplo n.º 1
0
def pretrain():
    sim = Simulation(nodes)
    state = sim.get_state()
    for i in range(pretrain_length):
        a = np.random.randint(0, len(actions)) # Random action
        new_state, reward, done = sim.step(actions[a])

        if done:
            # We finished the episode
            new_state = np.zeros(state.shape)
            memory.add((state, a, reward, new_state, done)) # Add experience to memory
            sim = Simulation(nodes) # Start a new episode
            state = sim.get_state() # First we need a state

        else:
            memory.add((state, a, reward, new_state, done)) # Add experience to memory
            state = new_state # Our state is now the next_state
Ejemplo n.º 2
0
    state = sim.get_state() # Initial state

    iteration = 0

    while not done:

        eps = 1/math.sqrt(iteration + 1)                # Gradually decrease exploration rate

        if np.random.random() < eps:
            a = np.random.randint(0, len(actions))      # Explore by picking a random action
        else:
            a = np.argmax(model.predict(state))             # Use network to predict which action to take

        action = actions[a]
        #print(action)
        new_state, reward, done = sim.step(action)               # Use selected action to update the environment


        if done:
            new_state = np.zeros(state.shape)
            memory.add((state, a, reward, new_state, done)) # Add experience to memory
        else:
            memory.add((state, a, reward, new_state, done)) # Add experience to memory

        state, reward, action, new_state, done = memory.sample()

        Q_target = reward + y * np.max(model.predict(new_state))   # Calculate Q-value based on new state
        Q_values = model.predict(state)[0]                         # Get both Q-values for this state
        Q_values[a] = Q_target                                     # Update Q-value for the action we took

        model.fit(state, Q_values.reshape(-1, 2), epochs=1, verbose=0) # Fit neural network to predict the Q-values