Exemple #1
0
    def __init__(self,pos,dna=None,generation=0, cal = 100.0, lim = 200, food_ID = ID_PLANT, color = COLOR_BLUE):
        pygame.sprite.Sprite.__init__(self, self.containers)
        Resource.__init__(self, pos, mass = lim, color = color)
        self.cal_limit = lim
        self.calories = cal                           # TODO get from genes
        self.ID = 4 + (food_ID != ID_PLANT)
        print "NEW BEING OF ID " + str(self.ID)
        self.food_ID = food_ID
        self.color = color
        # Attributes
        self.generation = generation
        self.velocity = random.randn(2)*0.2                                      # velocity vector 
        u = unitv(self.velocity)
        self.pa1 = rotate(u * self.radius*3,0.3)       # antennae 1
        self.pa2 = rotate(u * self.radius*3,-0.3)      # antennae 2
        self.hold = 0.0
        self.f_a = zeros(N_INPUTS, dtype=float)       
        #self.f_a[IDX_BIAS] = 1.
        # DNA
        # TODO learn OR select the reward-(summary) function
        #self.b = brain(MLPpf(N_INPUTS,N_HIDDEN,N_OUTPUTS,density=1.0),tau=(10+random.choice(100)),test=False)
        self.b = None
        #self.b = make_brain(N_INPUTS,20,N_OUTPUTS,f_desc="DE2",use_bias=False,density=0.5,tau=(1+random.choice(100)))
        if (dna is not None) and (random.rand() < 0.9): # (with a small chance of total mutation)
            self.b = dna.copy_of()
        else:
            self.b = make_brain(N_LINPUTS,random.choice([-5,-1,0,0,10,15,20,25,50]),N_OUTPUTS,f_desc="DE2",use_bias=False,density=clip(random.rand(),0.1,1.0),tau=(1+random.choice(100)))

        self.happiness = 0.
Exemple #2
0
from cerebro.functions import linear, sigmoid

b = None
baseline = 1
f_h=linear

if baseline == 2:
    # PS: don't forget to change e to y when learn()ing with this brain!!
    from cerebro.brain import brain_incr
    from cerebro.RLS import RLS, F_sigmoid, F_linear
    from cerebro.MOP import MOPpf
    b = brain_incr(MOPpf(D,1,f=linear),RLS(D*1,1,ridge=10.1,f=linear))
elif baseline == 1:
    from cerebro.brain import make_brain
    b = make_brain(D,0,1,f_h,f_h,density=0.5,tau=1,f_desc="DE")
else:
    from cerebro.brain_q import brain
    #from AC import AC
    from MOP import MOPpf
    b = brain(MOPpf(2,1,f=linear),QL(2,1))

# Plot the true understanding of the world
z = plot_surface(ww1,ww2,true_error_density)
title("true error surface of weights")
ax0.contourf(ww1,ww2,z,50)
ax0.plot(w_true[0],w_true[1],'bx',markersize=5,linewidth=5)

#C1 = ax1.contourf(ww1,ww2,z,50)
L2, = ax1.plot(b.g.get_weight_vector(), 'go', markersize=5) # gol
L3, = ax1.plot(b.g.get_weight_vector(), 'r-', linewidth=2) # history
Exemple #3
0
def rwd(p1,p2):
    return 1. - (sqrt((p1[0] - p2[0])**2 + (p1[1] - p2[1])**2) / DIAG)

# WORLD STATE
gol = random.randn(2) * WIDTH/4
pos = random.randn(2) * WIDTH/4

# Initial reward
r = rwd(gol,pos)

# BRAIN STATE
from cerebro.brain import make_brain
from cerebro.functions import linear
#b = make_brain(2,0,2,f_desc='QL',f_o=linear)
b = make_brain(2,10,2,f_desc='DE2',f_o=linear,tau=10)   

# (Setup Animation)
T = 10000

history = zeros((T,2))
history[0,:] = pos
fig = figure(figsize=(16.0, 10.0))
from matplotlib import gridspec
gs = gridspec.GridSpec(2, 1, height_ratios=[1,4])
ax_1 = fig.add_subplot(gs[0])
ax_1.set_ylim([-10,10])
ax_1.set_title('weights and reward')
Lk, = ax_1.plot([0,0],[0,0], 'k-', linewidth=3) 
Lw, = ax_1.plot(range(len(b.g.wv)),zeros(len(b.g.wv)), 'mo-', linewidth=1) 
ax_2 = fig.add_subplot(gs[1])