def run(genes, norm_inputs, en=1): env = Environment(experiment_name=experiment_name, playermode="ai", player_controller=player_0(normalize=norm_inputs), speed="fastest", enemymode="static", level=2, randomini="no", enemies=[en]) return env.play(genes)
enemies = [1, 2, 3, 4, 5, 6, 7, 8] lifes = np.empty((10, 8), dtype=("float64", (2, ))) log = [] for run in range(1, 11): # load best network ctr = np.loadtxt(f"./GENERALIST/EA/best/run_{run}.txt") gain = 0 for counter, enemy in enumerate(enemies): env = Environment( experiment_name=experiment_name, enemies=[enemy], # multiplemode='yes', playermode="ai", player_controller=player_controller(), enemymode="static", level=2, speed="fastest", logs="off") # stats per enemy player_life = 0 enemy_life = 0 for i in range(5): p, e = fitness(ctr) player_life += float(p) enemy_life += float(e) gain += (player_life - enemy_life) / 5.0 lifes[run - 1, counter] = [player_life / 5, enemy_life / 5]
# Load solution and specify values bsol = np.loadtxt( experiment_name + '/Solution1.txt' ) # txt file with best solution for 1 enemy with 1 EA for a specific run #print(bsol) enemy_nr = 1 # emeny where the solution is created for EA_name = "EA1" # EA that is used to create solution run_nr = 1 # 1 to 10 df = pd.DataFrame(columns=[ "Enemy", "Algorithm", "Run", "Repetition", "Energy enemy", "Enegy player" ]) # Environment n_hidden_neurons = 10 env = Environment(experiment_name=experiment_name, enemies=[enemy_nr], playermode="ai", player_controller=player_controller(n_hidden_neurons), enemymode="static", level=2, speed="fastest") # run 5 times and add to df for i in range(0, 5): f, p, e, t = env.play(pcont=bsol) df.loc[i, ] = [enemy_nr, EA_name, run_nr, i + 1, e, p] # write data to file print(df) df.to_csv("df_boxplot.csv")
import numpy as np import matplotlib.pyplot as plt import sys sys.path.insert(0, 'evoman') from evoman.environment import Environment from demo_controller import player_controller, enemy_controller from random import sample ENV = Environment(experiment_name="test", enemies=[2], playermode="ai", player_controller=player_controller(), enemy_controller=enemy_controller(), level=2, speed="fastest", contacthurt='player', logs='off') class Individual: dom_u = 1 dom_l = -1 mutation_rate = .3 n_hidden = 10 n_vars = (ENV.get_num_sensors() + 1) * n_hidden + (n_hidden + 1) * 5 # multilayer with 50 neurons def __init__(self): self.age = 0 self.weights = list() self.fitness = None self.enemy_life = None
last_best = 0 n_hidden_neurons = 10 #number of possible actions budget = 1500 runs = 10 envs = [] eatype = "PSO" # initializes environment with ai player using random controller, playing against static enemy enemies_g_1 = [7,8] enemies_g_2 = [2,5,6] env_1 = Environment(experiment_name=experiment_name, enemies=enemies_g_1, multiplemode="yes", playermode="ai", player_controller=player_controller(n_hidden_neurons), enemymode="static", level=2, speed="fastest", timeexpire = budget) env_2 = Environment(experiment_name=experiment_name, enemies=enemies_g_2, multiplemode="yes", playermode="ai", player_controller=player_controller(n_hidden_neurons), enemymode="static", level=2, speed="fastest", timeexpire = budget)
from demo_controller import player_controller from evoman.environment import Environment import numpy as np # import time experiment_name = "GENERALIST" if not os.path.exists(experiment_name): os.makedirs(experiment_name) ENEMY = [1, 2, 3, 4, 5, 6, 7, 8] env = Environment( experiment_name=experiment_name, # default: values.mean() - values.std() multiplemode="yes", enemies=ENEMY, playermode="ai", player_controller=player_controller(), enemymode="static", level=2, speed="fastest", # avoid print statements for SPOT logs="off") IND_SIZE = (env.get_num_sensors() + 1) * 10 + (10 + 1) * 5 RUN_MODE = "train" NGEN = 30 NRUN = 1 UPPER_LIMIT = 1.0 LOWER_LIMIT = -1.0 m = sys.argv[1]
gens = 120 mate = 1 mutation = 0.2 last_best = 0 n_hidden_neurons = 5 #number of possible actions budget = 500 enemies = 3 runs = 10 envs = [] eatype = "Roulette" # initializes environment with ai player using random controller, playing against static enemy for x in range(1, enemies + 1): temp = Environment(experiment_name=experiment_name, enemies=[x], playermode="ai", player_controller=player_controller(n_hidden_neurons), enemymode="static", level=2, speed="fastest", timeexpire=budget) envs.append(temp) env = envs[0] n_weights = (env.get_num_sensors() + 1) * n_hidden_neurons + (n_hidden_neurons + 1) * 5 creator.create('FitnessBest', base.Fitness, weights=(1.0, )) creator.create('Individual', np.ndarray, fitness=creator.FitnessBest, player_life=player_life, enemy_life=enemy_life)
from demo_controller import player_controller from evoman.environment import Environment import numpy as np # import time experiment_name = "EA" if not os.path.exists(experiment_name): os.makedirs(experiment_name) ENEMY = 3 env = Environment( experiment_name=experiment_name, enemies=[ENEMY], playermode="ai", player_controller=player_controller(), enemymode="static", level=2, speed="fastest", # avoid print statements for SPOT logs="off") IND_SIZE = (env.get_num_sensors() + 1) * 10 + (10 + 1) * 5 RUN_MODE = "train" NGEN = 20 NRUN = 1 UPPER_LIMIT = 1.0 LOWER_LIMIT = -1.0 m = sys.argv[1] m = eval(m.split()[0])