Exemplo n.º 1
0
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors

import os
import sys
sys.path.insert(0, '../../../')
import MultiAgent_Games

sys.path.insert(0, '../../../MultiAgent_GA')
from GA_Config import Config
from GA_Network import Network

config = Config()
config.num_layers = 2
config.num_hidden = 128
config.env_name = 'Bees-v1'
config.a_size = 5

network = Network(config)
weights = np.load('./models/Bees-v1_11/10500.npz')
network.w_in = weights['w_in']
network.w_hidden = weights['w_h']
network.w_out = weights['w_out']

env = gym.make(config.env_name).unwrapped
env.engine.num_bees = 1
env.engine.num_flowers = 1
s = env.reset()


def plt_model(ind, s, r):
Exemplo n.º 2
0
import matplotlib.patches as mpatch

import gym
import os
import sys
sys.path.insert(0, '../../..')
import GP_Games

sys.path.insert(0, '../../../Genetic_Algorithms')
from GA_Config import Config
from GA_Network import Network

config = Config()
config.num_layers = 2
config.num_hidden = 128
config.env_name = 'GP_Water-v0'
config.a_size = 4

network = Network(config)

generations = 100
steps = 100
iters = 1
actions = np.zeros([steps, generations, iters])

TargetTemp = 100
TargetMass = [0, 0.5, 0.5]
StartTemp = 21
StartMF = [0, 1, 0]

for gen in range(1, generations):