コード例 #1
0
ファイル: environment.py プロジェクト: georginlau/easy21-1
 def __init__(self):
     self.card_min = 1  # min absolute val of card
     self.card_max = 10  # max absolute val of card
     self.dl_values = 10  # possible values for dl in state
     self.pl_values = 21  # possible values for pl in state
     self.act_values = len(
         Actions.get_values())  # number of possible actions
コード例 #2
0
ファイル: agent.py プロジェクト: ruixu93/easy21
    def eps_greedy_choice_linear(self, state, epsilon):

        Qa = np.zeros(2)

        # epsilon greedy policy
        if random.random() > epsilon:
            for action in Actions.get_values():
                phi = self.feature_computation(state, action)
                Qa[action] = sum(phi*self.theta)
            a_next = Actions.get_action(np.argmax(Qa))
        else:
            a_next = Actions.hit if random.random()<0.5 else Actions.stick
        phi = self.feature_computation(state, a_next)
        my_Qa = sum(phi*self.theta)
        return [a_next, my_Qa]
コード例 #3
0
    def eps_greedy_choice_linear(self, state, epsilon):

        Qa = np.zeros(2)

        # epsilon greedy policy
        if random.random() > epsilon:
            for action in Actions.get_values():
                phi = self.feature_computation(state, action)
                Qa[action] = sum(phi * self.theta)
            a_next = Actions.get_action(np.argmax(Qa))
        else:
            a_next = Actions.hit if random.random() < 0.5 else Actions.stick
        phi = self.feature_computation(state, a_next)
        my_Qa = sum(phi * self.theta)
        return [a_next, my_Qa]
コード例 #4
0
ファイル: environment.py プロジェクト: ruixu93/easy21
 def __init__(self):
     self.card_min = 1    # min absolute val of card
     self.card_max = 10   # max absolute val of card
     self.dl_values = 10  # possible values for dl in state
     self.pl_values = 21  # possible values for pl in state
     self.act_values = len(Actions.get_values())  # number of possible actions
コード例 #5
0
ファイル: agent.py プロジェクト: ruixu93/easy21
    def TD_control_linear(self, iterations, mlambda, avg_it):

        self.mlambda = float(mlambda)
        self.iter = iterations
        self.method = "Sarsa_control_linear_approx"

        epsilon = 0.05
        alpha = 0.01

        l_mse = 0
        e_mse = np.zeros((avg_it,self.iter))
        monte_carlo_Q = pickle.load(open("Data/Qval_func_1000000_MC_control.pkl", "rb"))
        n_elements = monte_carlo_Q.shape[0]*monte_carlo_Q.shape[1]*2

        for my_it in xrange(avg_it):

            self.Q = np.zeros((self.env.dl_values, self.env.pl_values, self.env.act_values))
            self.LinE = np.zeros(len(self.d_edges)*len(self.p_edges)*2)
            self.theta = np.random.random(36)*0.2
            #self.theta = np.zeros(len(self.d_edges)*len(self.p_edges)*2)
            count_wins = 0

            # Loop over episodes (complete game runs)
            for episode in xrange(self.iter):

                self.LinE = np.zeros(36)
                s = self.env.get_initial_state()

                if np.random.random() < 1-epsilon:
                    Qa = -100000
                    a = None
                    for act in Actions.get_values():
                        phi_curr = self.feature_computation(s,act)
                        Q =  sum(self.theta*phi_curr)
                        if Q > Qa:
                            Qa = Q
                            a = act
                            phi = phi_curr
                else:
                    a = Actions.stick if np.random.random()<0.5 else Actions.hit
                    phi = self.feature_computation(s,a)
                    Qa = sum(self.theta*phi)

                # Execute until game ends
                while not s.term:

                    # Accumulating traces
                    self.LinE[phi==1] += 1

                    # execute action
                    s_next = self.env.step(s, a)

                    # compute delta
                    delta = s_next.rew - sum(self.theta*phi)

                    # choose next action with epsilon greedy policy
                    if np.random.random() < 1-epsilon:
                        Qa = float(-100000)
                        a = None
                        for act in Actions.get_values():
                            phi_curr = self.feature_computation(s_next,act)
                            Q =  sum(self.theta*phi_curr)
                            if Q > Qa:
                                Qa = Q
                                a = act
                                phi = phi_curr
                    else:
                        a = Actions.stick if np.random.random()<0.5 else Actions.hit
                        phi = self.feature_computation(s_next,a)
                        Qa = sum(self.theta*phi)

                    # delta
                    delta += Qa
                    self.theta += alpha*delta*self.LinE
                    self.LinE = self.mlambda*self.LinE

                    # reassign s and a
                    s = s_next

                #if episode%10000==0: print "Episode: %d, Reward: %d" %(episode, s_next.rew)
                count_wins = count_wins+1 if s_next.rew==1 else count_wins

                self.Q = self.deriveQ()
                e_mse[my_it, episode] = np.sum(np.square(self.Q-monte_carlo_Q))/float(n_elements)

            print float(count_wins)/self.iter*100

            self.Q = self.deriveQ()
            l_mse += np.sum(np.square(self.Q-monte_carlo_Q))

        if mlambda==0 or mlambda==1:
            plt.plot(e_mse.mean(axis=0))
            plt.ylabel('mse vs episodes')
            plt.show()
        # Derive value function
        for d in xrange(self.env.dl_values):
            for p in xrange(self.env.pl_values):
                self.V[d,p] = max(self.Q[d, p, :])

        #print self.theta

        return l_mse/float(n_elements)
コード例 #6
0
    def TD_control_linear(self, iterations, mlambda, avg_it):

        self.mlambda = float(mlambda)
        self.iter = iterations
        self.method = "Sarsa_control_linear_approx"

        epsilon = 0.05
        alpha = 0.01

        l_mse = 0
        e_mse = np.zeros((avg_it, self.iter))
        monte_carlo_Q = pickle.load(
            open("Data/Qval_func_1000000_MC_control.pkl", "rb"))
        n_elements = monte_carlo_Q.shape[0] * monte_carlo_Q.shape[1] * 2

        for my_it in xrange(avg_it):

            self.Q = np.zeros(
                (self.env.dl_values, self.env.pl_values, self.env.act_values))
            self.LinE = np.zeros(len(self.d_edges) * len(self.p_edges) * 2)
            self.theta = np.random.random(36) * 0.2
            #self.theta = np.zeros(len(self.d_edges)*len(self.p_edges)*2)
            count_wins = 0

            # Loop over episodes (complete game runs)
            for episode in xrange(self.iter):

                self.LinE = np.zeros(36)
                s = self.env.get_initial_state()

                if np.random.random() < 1 - epsilon:
                    Qa = -100000
                    a = None
                    for act in Actions.get_values():
                        phi_curr = self.feature_computation(s, act)
                        Q = sum(self.theta * phi_curr)
                        if Q > Qa:
                            Qa = Q
                            a = act
                            phi = phi_curr
                else:
                    a = Actions.stick if np.random.random(
                    ) < 0.5 else Actions.hit
                    phi = self.feature_computation(s, a)
                    Qa = sum(self.theta * phi)

                # Execute until game ends
                while not s.term:

                    # Accumulating traces
                    self.LinE[phi == 1] += 1

                    # execute action
                    s_next = self.env.step(s, a)

                    # compute delta
                    delta = s_next.rew - sum(self.theta * phi)

                    # choose next action with epsilon greedy policy
                    if np.random.random() < 1 - epsilon:
                        Qa = float(-100000)
                        a = None
                        for act in Actions.get_values():
                            phi_curr = self.feature_computation(s_next, act)
                            Q = sum(self.theta * phi_curr)
                            if Q > Qa:
                                Qa = Q
                                a = act
                                phi = phi_curr
                    else:
                        a = Actions.stick if np.random.random(
                        ) < 0.5 else Actions.hit
                        phi = self.feature_computation(s_next, a)
                        Qa = sum(self.theta * phi)

                    # delta
                    delta += Qa
                    self.theta += alpha * delta * self.LinE
                    self.LinE = self.mlambda * self.LinE

                    # reassign s and a
                    s = s_next

                #if episode%10000==0: print "Episode: %d, Reward: %d" %(episode, s_next.rew)
                count_wins = count_wins + 1 if s_next.rew == 1 else count_wins

                self.Q = self.deriveQ()
                e_mse[my_it, episode] = np.sum(
                    np.square(self.Q - monte_carlo_Q)) / float(n_elements)

            print float(count_wins) / self.iter * 100

            self.Q = self.deriveQ()
            l_mse += np.sum(np.square(self.Q - monte_carlo_Q))

        if mlambda == 0 or mlambda == 1:
            plt.plot(e_mse.mean(axis=0))
            plt.ylabel('mse vs episodes')
            plt.show()
        # Derive value function
        for d in xrange(self.env.dl_values):
            for p in xrange(self.env.pl_values):
                self.V[d, p] = max(self.Q[d, p, :])

        #print self.theta

        return l_mse / float(n_elements)