Exemple #1
0
def buildNet_big_layer():
    substrate = NEAT.Substrate([(-1, -1), (-1, 1)],
                               [(0, i - (10 - 1) / 2) for i in xrange(10)],
                               [(1, 0), (1, 1)])
    substrate.m_hidden_nodes_activation = NEAT.ActivationFunction.TANH
    substrate.m_output_nodes_activation = NEAT.ActivationFunction.LINEAR
    return substrate
def create_substrate(dim):
    """
    The function to create two-sheets substrate configuration with specified
    dimensions of each sheet.
    Arguments:
        dim:    The dimensions accross X, Y axis of the sheet
    """
    # Building sheet configurations of inputs and outputs
    inputs = create_sheet_space(-1, 1, dim, -1)
    outputs = create_sheet_space(-1, 1, dim, 0)

    substrate = NEAT.Substrate( inputs,
                                [], # hidden
                                outputs)

    substrate.m_allow_input_output_links = True

    substrate.m_allow_input_hidden_links = False
    substrate.m_allow_hidden_hidden_links = False
    substrate.m_allow_hidden_output_links = False
    substrate.m_allow_output_hidden_links = False
    substrate.m_allow_output_output_links = False
    substrate.m_allow_looped_hidden_links = False
    substrate.m_allow_looped_output_links = False

    substrate.m_hidden_nodes_activation = NEAT.ActivationFunction.SIGNED_SIGMOID
    substrate.m_output_nodes_activation = NEAT.ActivationFunction.UNSIGNED_SIGMOID

    substrate.m_with_distance = True
    substrate.m_max_weight_and_bias = 3.0

    return substrate
Exemple #3
0
def buildNet_perceptron_mnist():
    substrate = NEAT.Substrate([(-1, x, y) for x in xrange(28)
                                for y in xrange(28)], [],
                               [(1, 0, x) for x in xrange(10)])
    substrate.m_allow_input_output_links = True
    substrate.m_hidden_nodes_activation = NEAT.ActivationFunction.TANH
    substrate.m_output_nodes_activation = NEAT.ActivationFunction.LINEAR
    return substrate
Exemple #4
0
def buildNet_big_layer_mnist():
    substrate = NEAT.Substrate([(-1, x, y) for x in xrange(28)
                                for y in xrange(28)], [(0, x, y)
                                                       for x in xrange(20)
                                                       for y in xrange(20)],
                               [(1, 0, x) for x in xrange(10)])
    substrate.m_hidden_nodes_activation = NEAT.ActivationFunction.TANH
    substrate.m_output_nodes_activation = NEAT.ActivationFunction.LINEAR
    return substrate
Exemple #5
0
def grid2d_substrate(inputs: int, hidden_layers: int, nodes_per_layer: [int], outputs: int, leaky=True):
    input_nodes = get_2d_point_line(inputs, (-1.0, -1.0), (-1.0, 1.0))
    hidden_nodes = []
    layers = get_2d_point_line(hidden_layers + 2, (-1, 0), (1, 0))
    for i in range(hidden_layers):
        layer_x = layers[i + 1][0]
        hidden_nodes += get_2d_point_line(nodes_per_layer[i], (layer_x, -1.0), (layer_x, 1.0))
    output_nodes = get_2d_point_line(outputs, (1.0, -1.0), (1.0, 1.0))

    subst = neat.Substrate(input_nodes, hidden_nodes, output_nodes)
    subst.m_query_weights_only = True
    return subst
def create_substrate():
    """
    The function to create appropriate substrate configuration with 16 inputs and 2 outputs.
    """
    # The input layer
    x_space = np.linspace(-1.0, 1.0, num=4)
    inputs = [
        (x_space[0], 0.0, 1.0),
        (x_space[1], 0.0, 1.0),
        (x_space[0], 0.0, -1.0),
        (x_space[1], 0.0, -1.0),  # the left side
        (x_space[2], 0.0, 1.0),
        (x_space[3], 0.0, 1.0),
        (x_space[2], 0.0, -1.0),
        (x_space[3], 0.0, -1.0),  # the right side
        (0, 0, 0)  # the bias
    ]
    # The output layer
    outputs = [(-1.0, 1.0, 0.0), (1.0, 1.0, 0.0)]

    substrate = NEAT.Substrate(
        inputs,
        [],  # hidden
        outputs)

    # Allow connections: input-to-hidden, hidden-to-output, and hidden-to-hidden
    substrate.m_allow_input_hidden_links = True
    substrate.m_allow_hidden_output_links = True
    substrate.m_allow_hidden_hidden_links = True

    substrate.m_allow_input_output_links = False
    substrate.m_allow_output_hidden_links = False
    substrate.m_allow_output_output_links = False
    substrate.m_allow_looped_hidden_links = False
    substrate.m_allow_looped_output_links = False

    substrate.m_hidden_nodes_activation = NEAT.ActivationFunction.SIGNED_SIGMOID
    substrate.m_output_nodes_activation = NEAT.ActivationFunction.UNSIGNED_SIGMOID

    substrate.m_with_distance = True  # send connection length to the CPPN as a parameter
    substrate.m_max_weight_and_bias = 8.0

    return substrate
Exemple #7
0
#!/usr/bin/env python3
import gym

import sys

import MultiNEAT as NEAT
import MultiNEAT.viz as viz
import random as rnd
import pickle
import numpy as np
from tqdm import tqdm
import cv2

substrate = NEAT.Substrate([(-1, -1), (-1, 0), (-1, 1)],
                           [(0, -1), (0, 0), (0, 1)],
                           [(1, 0)])

substrate.m_allow_input_hidden_links = False
substrate.m_allow_input_output_links = False
substrate.m_allow_hidden_hidden_links = False
substrate.m_allow_hidden_output_links = False
substrate.m_allow_output_hidden_links = False
substrate.m_allow_output_output_links = False
substrate.m_allow_looped_hidden_links = False
substrate.m_allow_looped_output_links = False

substrate.m_allow_input_hidden_links = True
substrate.m_allow_input_output_links = False
substrate.m_allow_hidden_output_links = True
substrate.m_allow_hidden_hidden_links = False
Exemple #8
0
    - Horizontal displacement
'''
genome = NEAT.Genome(0, 310, 30, 2, False,
                     NEAT.ActivationFunction.UNSIGNED_SIGMOID,
                     NEAT.ActivationFunction.UNSIGNED_SIGMOID, 0, params)

params = NEAT.Parameters()
params.PopulationSize = 20
params.DynamicCompatibility = True

pop = NEAT.Population(genome, params, True, 1.0, 0)
substrate = [((i / width) / height, (i % width) / width, 0.0)
             for i in range(width * height)]
piecearray = [(i / 25, (i % 5) / 5, 1.0) for i in range(25)]
substrate.extend(piecearray * 4)
substrate = NEAT.Substrate(substrate, [], [((i - 7) / 7, ) for i in range(14)])


def evaluate(genome):
    # create a neural network for the genome
    net = NEAT.NeuralNetwork()
    genome.BuildESHyperNEATPhenotype(net, substrate, params)

    # create a board
    board = Board(width, height)
    canAddPiece = 1
    current = 0
    line = 0
    maxPieces = 300

    # while the game is not over
Exemple #9
0
params.Width = 1.
params.Height = 1.
params.Elitism = 0.1
params.CrossoverRate = 0.5
params.MutateWeightsSevereProb = 0.01

params.MutateNeuronTraitsProb = 0
params.MutateLinkTraitsProb = 0

rng = NEAT.RNG()
rng.TimeSeed()

substrate = NEAT.Substrate([
    (-1., -1., 0.0),
    (-.5, -1., 0.0),
    (0.0, -1., 0.0),
    (.5, -1., 0.0),
    (1.0, -1., 0.0),
    (0.0, -1.0, -1.0),
], [], [(-1., 1., 0.0), (1.0, 1.0, 0.0)])

substrate.m_allow_input_hidden_links = False
substrate.m_allow_input_output_links = False
substrate.m_allow_hidden_hidden_links = False
substrate.m_allow_hidden_output_links = False
substrate.m_allow_output_hidden_links = False
substrate.m_allow_output_output_links = False
substrate.m_allow_looped_hidden_links = False
substrate.m_allow_looped_output_links = False

# let's set the activation functions
substrate.m_hidden_nodes_activation = NEAT.ActivationFunction.SIGNED_SIGMOID
params.BandThreshold = 0.3
params.InitialDepth = 3
params.MaxDepth = 3
params.IterationLevel = 1
params.Leo = True
params.LeoSeed = False
params.LeoThreshold = 0.3
params.CPPN_Bias = 1.0
params.Qtree_X = 0.0
params.Qtree_Y = 0.0
params.Width = 2.0

rng = NEAT.RNG()
rng.TimeSeed()

substrate = NEAT.Substrate([(-1., -1., 0.0), (1., -1., 0.0), (0., -1., 0.0)],
                           [], [(0., 1., 0.0)])

substrate.m_allow_input_hidden_links = False
substrate.m_allow_input_output_links = False
substrate.m_allow_hidden_hidden_links = False
substrate.m_allow_hidden_output_links = False
substrate.m_allow_output_hidden_links = False
substrate.m_allow_output_output_links = False
substrate.m_allow_looped_hidden_links = False
substrate.m_allow_looped_output_links = False

# let's set the activation functions
substrate.m_hidden_nodes_activation = NEAT.ActivationFunction.SIGNED_SIGMOID
substrate.m_output_nodes_activation = NEAT.ActivationFunction.SIGNED_SIGMOID

# when to output a link and max weight
Exemple #11
0
#     elif i>11 and i<22:
#         meh.append(((i-11)/9,-1.,1.))
meh.append((0,0.5,-1))
meh.append((-0.5,0.5,-1))
meh.append((0.5,0.5,-1))
meh.append((0,-1,-1))
	# else:
	# 	meh.append((i/11.,-1.,1.))
	# 	meh.append(((i-0)/41,-1,0))
	# elif i >10 and i<14:
	# 	meh.append(((i-0)/41,-1,0))
	# else:
	# 	meh.append((i/41,-1.,0.))
print(meh)			
substrate = NEAT.Substrate(meh,#(1., -1, 1.),(0., -1, 1.)],
                           [(0., 0, 1.),(-1.,0,1.0),(1.,0,1.0)],
                           [(-1.9, 1., 1.),(1.9, 1., 1.)])#(-1.9, 1., 1.),(-1.3, 1., 1.),(-0.7, 1., 1.),(0, 1., 1.),(0.7, 1., 1.),(1.3, 1., 1.),(1.9, 1., 1.),])

substrate.m_allow_input_hidden_links = True
substrate.m_allow_input_output_links = True
substrate.m_allow_hidden_hidden_links = True
substrate.m_allow_hidden_output_links = True
substrate.m_allow_output_hidden_links = True
substrate.m_allow_output_output_links = True
substrate.m_allow_looped_hidden_links = True
substrate.m_allow_looped_output_links = True

substrate.m_allow_input_hidden_links = True
substrate.m_allow_input_output_links = True
substrate.m_allow_hidden_output_links = True
substrate.m_allow_hidden_hidden_links = True
Exemple #12
0
    cm = ax.contourf(pattern,
                     200,
                     cmap='autumn',
                     origin='lower',
                     extent=[-1, 1, -1, 1])
    #for point in points:
    #    ax.add_patch(plt.Circle((point[0], point[1]), 0.04, fc='red'))
    plt.show()

    return


################################################################

substrate = NEAT.Substrate([(-1, -1, -1.0), (-0.66, -1.0, -1.0),
                            (0.66, -1.0, 1.0), (1., -1.0, 1.0), (0., -1., 0.)],
                           [], [(-0.5, 1, 0.), (0.5, 1, 0.0)])
'''
substrate = NEAT.Substrate([(-1., -1., 0.0), (-1., 1., 0.0), (-1., 0., 0.0)],
                           [],
                           [(1., 0., 0.0)])'''


def Scale(a, a_min, a_max, a_tr_min, a_tr_max):
    t_a_r = a_max - a_min
    if t_a_r == 0:
        return a_max

    t_r = a_tr_max - a_tr_min
    rel_a = (a - a_min) / t_a_r
    return a_tr_min + t_r * rel_a
Exemple #13
0
import sys

sys.path.append("/home/peter")
sys.path.append("/home/peter/Desktop")
sys.path.append("/home/peter/Desktop/projects")
import time
import random as rnd
import commands as comm
import cv2
import numpy as np
import cPickle as pickle
import MultiNEAT as NEAT
import multiprocessing as mpc

# the simple 3D substrate with 3 input points, 2 hidden and 1 output for XOR
substrate = NEAT.Substrate([(-1, 1, 0), (1, 0, 0), (0, 1, 0)],
                           [(0.5, 0.5, 0.5), (-0.5, 1.5, 0.5)], [(0, 0, 1)])

# let's configure it a bit to avoid recurrence in the substrate
substrate.m_allow_hidden_hidden_links = False
substrate.m_allow_hidden_output_links = False
substrate.m_allow_looped_hidden_links = False
substrate.m_allow_looped_output_links = False

# let's set the activation functions
substrate.m_hidden_nodes_activation = NEAT.ActivationFunction.UNSIGNED_SIGMOID
substrate.m_outputs_nodes_activation = NEAT.ActivationFunction.UNSIGNED_SIGMOID

# when to output a link and max weight
substrate.m_link_threshold = 0.2
substrate.m_max_weight = 5.0
Exemple #14
0
def buildNet_perceptron():
    substrate = NEAT.Substrate([(-1, -1), (-1, 1)], [], [(1, 0), (1, 1)])
    substrate.m_allow_input_output_links = True
    substrate.m_hidden_nodes_activation = NEAT.ActivationFunction.TANH
    substrate.m_output_nodes_activation = NEAT.ActivationFunction.LINEAR
    return substrate