Esempio n. 1
0
        env = MicroRTSStatsRecorder(env, args.gamma)
        if args.capture_video:
            if idx == 0:
                env = Monitor(env, f'videos/{experiment_name}')
        env.seed(seed)
        env.action_space.seed(seed)
        env.observation_space.seed(seed)
        return env

    return thunk


# envs = VecPyTorch(DummyVecEnv([make_env(args.gym_id, args.seed+i, i) for i in range(args.num_envs)]), device)
envs = MicroRTSVecEnv(
    num_envs=args.num_envs,
    render_theme=2,
    ai2s=[microrts_ai.passiveAI for _ in range(args.num_envs)],
    map_path="maps/16x16/basesWorkers16x16.xml",
    reward_weight=np.array([10.0, 1.0, 1.0, 0.2, 1.0, 4.0]))
envs = VecMonitor(envs)
envs = VecPyTorch(envs, device)
# if args.prod_mode:
#     envs = VecPyTorch(
#         SubprocVecEnv([make_env(args.gym_id, args.seed+i, i) for i in range(args.num_envs)], "fork"),
#         device
#     )
assert isinstance(
    envs.action_space,
    MultiDiscrete), "only MultiDiscrete action space is supported"


# ALGO LOGIC: initialize agent here:
Esempio n. 2
0
                     save_code=True)
    writer = SummaryWriter(f"/tmp/{experiment_name}")

# TRY NOT TO MODIFY: seeding
device = torch.device(
    'cuda' if torch.cuda.is_available() and args.cuda else 'cpu')
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.backends.cudnn.deterministic = args.torch_deterministic
envs = MicroRTSVecEnv(
    num_envs=args.num_envs,
    max_steps=2000,
    render_theme=2,
    ai2s=[microrts_ai.coacAI for _ in range(args.num_envs-6)] + \
        [microrts_ai.randomBiasedAI for _ in range(2)] + \
        [microrts_ai.lightRushAI for _ in range(2)] + \
        [microrts_ai.workerRushAI for _ in range(2)],
    map_path="maps/16x16/basesWorkers16x16.xml",
    reward_weight=np.array([10.0, 1.0, 1.0, 0.2, 1.0, 4.0])
)
envs = MicroRTSStatsRecorder(envs, args.gamma)
envs = VecMonitor(envs)
envs = VecPyTorch(envs, device)
if args.capture_video:
    envs = VecVideoRecorder(envs,
                            f'videos/{experiment_name}',
                            record_video_trigger=lambda x: x % 1000000 == 0,
                            video_length=2000)
assert isinstance(
    envs.action_space,
Esempio n. 3
0
import numpy as np
import gym
import gym_microrts
import time
from gym_microrts.envs.vec_env import MicroRTSVecEnv
from gym_microrts import microrts_ai
from gym.envs.registration import register
from gym_microrts import Config

try:
    env = MicroRTSVecEnv(num_envs=1,
                         render_theme=2,
                         ai2s=[microrts_ai.coacAI],
                         map_path="maps/16x16/basesWorkers16x16.xml",
                         reward_weight=np.array(
                             [10.0, 1.0, 1.0, 0.2, 1.0, 4.0]))
    # env = gym.make('MicrortsDefeatCoacAIShaped-v3').env
    # env = gym.wrappers.RecordEpisodeStatistics(env)
    # env.action_space.seed(0)
    obs = env.reset()
    env.render()
except Exception as e:
    e.printStackTrace()
env.action_space.seed(0)
env.reset()
for i in range(10000):
    env.render()
    action_mask = np.array(env.vec_client.getUnitLocationMasks()).flatten()
    time.sleep(0.001)
    action = env.action_space.sample()
Esempio n. 4
0
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
from model import Agent, MicroRTSStatsRecorder, VecMonitor, VecPyTorch
import pickle
from gym_microrts.envs.vec_env import MicroRTSVecEnv
from gym_microrts import microrts_ai
import itertools
import os

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

envs = MicroRTSVecEnv(num_envs=1,
                      render_theme=2,
                      ai2s=[microrts_ai.workerRushAI for _ in range(1)],
                      map_path="maps/10x10/basesWorkers10x10.xml",
                      reward_weight=np.array([10.0, 1.0, 1.0, 0.2, 1.0, 4.0]))
envs = MicroRTSStatsRecorder(envs)
envs = VecMonitor(envs)
envs = VecPyTorch(envs, device)


def enjoy(agent):
    rewards = []
    next_obs = envs.reset()
    while True:
        action, logproba, _, _ = agent.get_action(next_obs, envs=envs)
        try:
            next_obs, rs, ds, infos = envs.step(action.T)
            rewards.append(rs)
Esempio n. 5
0
    "lightRushAI": microrts_ai.lightRushAI,
    "coacAI": microrts_ai.coacAI,
    "naiveMCTSAI": microrts_ai.naiveMCTSAI,
    "mixedBot": microrts_ai.mixedBot,
    "rojo": microrts_ai.rojo,
    "izanagi": microrts_ai.izanagi,
    "tiamat": microrts_ai.tiamat,
    "droplet": microrts_ai.droplet,
    "guidedRojoA3N": microrts_ai.guidedRojoA3N
}
ai_names, ais = list(all_ais.keys()), list(all_ais.values())
ai_match_stats = dict(zip(ai_names, np.zeros((len(ais), 3))))
args.num_envs = len(ais)
envs = MicroRTSVecEnv(num_envs=len(ais),
                      render_theme=2,
                      ai2s=ais,
                      map_path="maps/16x16/basesWorkers16x16A.xml",
                      reward_weight=np.array([10.0, 1.0, 1.0, 0.2, 1.0, 4.0]))
envs = MicroRTSStatsRecorder(envs)
envs = VecMonitor(envs)
envs = VecPyTorch(envs, device)
# if args.prod_mode:
#     envs = VecPyTorch(
#         SubprocVecEnv([make_env(args.gym_id, args.seed+i, i) for i in range(args.num_envs)], "fork"),
#         device
#     )
assert isinstance(
    envs.action_space,
    MultiDiscrete), "only MultiDiscrete action space is supported"

Esempio n. 6
0
import numpy as np
import gym
import gym_microrts

from gym_microrts.envs.vec_env import MicroRTSVecEnv
from gym_microrts import microrts_ai
from gym.envs.registration import register
from gym_microrts import Config

try:
    env = MicroRTSVecEnv(num_envs=1,
                         render_theme=2,
                         ai2=microrts_ai.coacAI,
                         map_path="maps/16x16/basesWorkers16x16.xml",
                         reward_weight=np.array(
                             [10.0, 1.0, 1.0, 0.2, 1.0, 4.0, 0.0]))
    # env = gym.make('MicrortsDefeatCoacAIShaped-v3').env
    # env = gym.wrappers.RecordEpisodeStatistics(env)
    # env.action_space.seed(0)
    obs = env.reset()
    env.render()
except Exception as e:
    e.printStackTrace()

# print("unit_locatiuons are at", np.where(env.get_unit_location_mask()==1)[0])

print("reward is", env.step([[17, 2, 0, 3, 0, 1, 2, 123]])[1])
env.render()
# print("unit_locatiuons are at", np.where(env.get_unit_location_mask()==1)[0])
print("reward is", env.step([[34, 4, 1, 2, 1, 2, 3, 109]])[1])
env.render()