Пример #1
0
import random
import numpy as np
import pickle

from env import SoccerEnv
from soccer_stat import SoccerStat

""" This file provides a baseline by using two random agents
"""

# set environment
env = SoccerEnv(width=5, height=5, goal_size=3)

# parameters
EPISODES = 5000

# statistic
stat = SoccerStat()

for i in range(EPISODES):
    state = env.reset()
    stat.set_initial_ball(state[4])

    rewardL = 0
    rewardR = 0
    done = False
    while not done:
        # agent 1 decides its action
        actionL = random.randint(0, env.act_dim-1)

        # agent 2 decides its action
Пример #2
0
from env import SoccerEnv
from agents.common.training_opponent import StationaryOpponent, RandomSwitchOpponent, RLBasedOpponent

TOP = 0
TOP_RIGHT = 1
RIGHT = 2
BOTTOM_RIGHT = 3
BOTTOM = 4
BOTTOM_LEFT = 5
LEFT = 6
TOP_LEFT = 7

env = SoccerEnv()
agentOP = StationaryOpponent(env_width=env.width, env_height=env.height, env_goal_size=env.goal_size)

state = env.reset()

# loop
env.show()
actionOP = agentOP.get_action(state)
print(actionOP)
done, reward_l, reward_r, state, actions = env.step("type action here!", actionOP)

agentOP.adjust(done, reward_r, i)