def evaluateBest(ind):
    try:
        layers = ind[0]
        par = ind[1]
        dfns = []
        for i in range(len(layers)):
            inpt = None
            if len(dfns) == 0:
                inpt = params["inputShape"]
            else:
                inpt = dfns[-1].get_output_shape()
            dfn = defineLayer(inpt, layers[i])
            if dfn != None:
                dfns.append(dfn)
        net = NNet(dfns, params["batchSize"])

        fitness = [[-1],[-1]]
        fitness = net.train(training_data, 100, params["batchSize"], par["learningRate"],
            validation_data, test_data, lmbda=par["l2"])
        print("Individual fitness " + str(numpy.average(fitness[1])))
        print(fitness)
        return fitness
    except Exception as e:
        print("error in evaluation")
        print(e)
        return [[-1],[-1]]
import theano
import sys
import numpy

from NN.NN import NNet
from NN.Layers.FullyConnected import FullyConnectedLayer
from NN.Layers.Softmax import SoftmaxLayer
from NN.MNISTLoader import load_data

# CONFIG
try: theano.config.device = 'gpu'
except: pass
theano.config.floatX = 'float32'
numpy.random.seed(0)

# HYPER PARAMETERS
mini_batch_size = 100
epochs = 100
learning_rate = 1

net = NNet([
            FullyConnectedLayer(n_in=784, n_out=30),
            FullyConnectedLayer(n_in=30, n_out=10),
            SoftmaxLayer(n_in=10, n_out=10)],
            mini_batch_size)

training_data, validation_data, test_data = load_data()

net.train(training_data, epochs, mini_batch_size, learning_rate,
            validation_data, test_data)