def train(self, transitionSamples):

        print "Entrenando..."

        k = 0
        trainer = RPropMinusTrainer(self.Q, batchlearning=True)
        #trainer = BackpropTrainer(self.Q, batchlearning=False)
        TS = SupervisedDataSet(4, 1)

        while (k < self._epochs):

            if k % 10 == 0:
                print "\t ", k

            # Genero training set en base a las muestras
            # Input: Vector de 4 dimensiones (angulo, vel.angular, pos, accion)
            # Target: Valor

            TS.clear()

            for s, a, s_1, costo in transitionSamples:

                # Tomo Q para s', para todas las acciones posibles
                # (vector con el valor para s', para cada una de las 3 acciones posibles)
                # Q_s1 = [ self.Q.activate([s_1.angulo, s_1.velocidadAngular, s_1.posicion, b]) for b in range(Accion.maxValor + 1) ]
                valDerecha = self.Q.activate([
                    s_1.angulo, s_1.velocidadAngular, s_1.posicion,
                    Accion.DERECHA
                ])
                valIzquierda = self.Q.activate([
                    s_1.angulo, s_1.velocidadAngular, s_1.posicion,
                    Accion.IZQUIERDA
                ])

                if valDerecha >= 1 or valDerecha <= 0:
                    print "Q incorrecta: ", valDerecha

                if valIzquierda >= 1 or valIzquierda <= 0:
                    print "Q incorrecta: ", valIzquierda

                # Input y Target para la red neuronal
                inputVal = (s.angulo, s.velocidadAngular, s.posicion, a)

                if costo == 0:
                    targetVal = costo
                else:
                    targetVal = costo + self._gamma * min(
                        valDerecha, valIzquierda)

                if targetVal > 1 or targetVal < 0:
                    print "Target incorrecto: ", targetVal

                TS.addSample(inputVal, targetVal)

            # Entreno la red neuronal
            trainer.setData(TS)
            trainer.train()  # 1 epoch
            #trainer.trainEpochs(self._epochsNN)

            k = k + 1
Exemple #2
0
class ANNApproximator(object):
    def __init__(self, alpha):
        self.name = "ANNApprox"
        self.network = FeedForwardNetwork()
        inLayer = LinearLayer(4)
        hiddenLayer = SigmoidLayer(12)
        outLayer = LinearLayer(1)
        self.network.addInputModule(inLayer)
        self.network.addModule(hiddenLayer)
        self.network.addOutputModule(outLayer)
        in_to_hidden = FullConnection(inLayer, hiddenLayer)
        hidden_to_out = FullConnection(hiddenLayer, outLayer)
        self.network.addConnection(in_to_hidden)
        self.network.addConnection(hidden_to_out)

        # Last step to make sure everything works in the connections
        self.network.sortModules()

        self.dataset = SupervisedDataSet(4, 1)
        self.trainer = BackpropTrainer(self.network,
                                       self.dataset,
                                       learningrate=alpha,
                                       momentum=0.0,
                                       verbose=True)

    def computeOutput(self, state_features):
        return self.network.activate(state_features)[0]

    def updateWeights(self, features, desired_output):
        print("updateWeights: features: {0}".format(features))
        print("updateWeights: value: {0}".format(desired_output))
        self.dataset.addSample(features, desired_output)
        # self.trainer.train()
        self.trainer.trainEpochs(10)
        self.dataset.clear()
Exemple #3
0
def neural_network(data, target, network):
    DS = SupervisedDataSet(len(data[0]), 1)
    nn = buildNetwork(len(data[0]), 7, 1, bias = True)
    kf = KFold(len(target), 10, shuffle = True);
    RMSE_NN = []
    for train_index, test_index in kf:
        data_train, data_test = data[train_index], data[test_index]
        target_train, target_test = target[train_index], target[test_index]
        for d,t in zip(data_train, target_train):
            DS.addSample(d, t)
        bpTrain = BackpropTrainer(nn,DS, verbose = True)
        #bpTrain.train()
        bpTrain.trainUntilConvergence(maxEpochs = 10)
        p = []
        for d_test in data_test:
            p.append(nn.activate(d_test))
        
        rmse_nn = sqrt(np.mean((p - target_test)**2))
        RMSE_NN.append(rmse_nn)
        DS.clear()
    time = range(1,11)
    plt.figure()
    plt.plot(time, RMSE_NN)
    plt.xlabel('cross-validation time')
    plt.ylabel('RMSE')
    plt.show()
    print(np.mean(RMSE_NN))
Exemple #4
0
Fichier : nn.py Projet : mistler/nn
def train(nn, data, N, predictionLength, iterations, validationSize):
    loss = 0.
    lossSize = 1.
    for n in range(iterations):
        dataSet = SupervisedDataSet(5 * N, 1)
        start = 1. * (len(data) - validationSize - 1 - N - predictionLength) / iterations * n
        end = 1. * (len(data) - validationSize - 1 - N - predictionLength) / iterations * (n + 1) - validationSize
        validation = end + validationSize
        start = int(start)
        end = int(end)
        validation = int(validation)
        for i in range(start, end):
            sample, mainValue = data.contiguousArray(i, i + N)
            output = data.normalizedMax(i + N + 1, i + N + predictionLength + 1, mainValue)
            dataSet.addSample(sample, (output,))
        print "iteration: ", n, " start: ", start, " end: ", end
        trainer = BackpropTrainer(nn, dataSet)
        trainer.train()
        dataSet.clear()
        for i in range(end, validation):
            sample, mainValue = data.contiguousArray(i, i + N)
            realOutput = data.max(i + N + 1, i + N + predictionLength + 1)
            nnOutputValue = nn.activate(sample)[0] + mainValue
            dt = data.date(i + N + 1)
            currentLoss = nnOutputValue - realOutput
            loss += currentLoss * currentLoss
            print '============================'
            print dt
            print "NN: ", "{0:.10f}".format(nnOutputValue), " Real: ", "{0:.10f}".format(realOutput)
            print "LOSS: ", "{0:.10f}".format(currentLoss)
            print "LOSS TOTAL: ", "{0:.10f}".format(sqrt(loss / lossSize))
            print '============================'
            lossSize += 1.
class bpNetController(object):
	def __init__(self, *args):
		self.debug = False
		self.setup(*args)

	def setup(self, depth = 4, refLen =5):
		self.inCnt = refLen + 1
		self.net = buildNetwork(self.inCnt, depth, 1, bias=True, hiddenclass=TanhLayer)
		self.ds = SupervisedDataSet(self.inCnt, 1)
		self.trainer = BackpropTrainer(self.net, self.ds)
		self.clear()

	def enableDebug(self):
		self.debug = True

	def sample(self, refs, inp, expectedOut):
		if self.debug: print "added {}".format([refs, inp, expectedOut])
		self.ds.addSample(refs+[inp], expectedOut)

	def train(self, epochs = 100):
		self.trainer.trainEpochs(epochs)

	def clear(self):
		self.ds.clear()

	def act(self, refs, inp):
		return self.net.activate(refs+[inp])

	@property
	def curEpoch(self):
		return self.trainer.epoch
Exemple #6
0
Fichier : nn.py Projet : mistler/nn
def trainUntilConvergence(nn, data, N, predictionLength):
    dataSet = SupervisedDataSet(5 * N, 1)
    start = 0
    end = len(data) + 1 - N - predictionLength
    for i in range(start, end):
        sample, mainValue = data.contiguousArray(i, i + N)
        output = data.normalizedMax(i + N + 1, i + N + predictionLength + 1, mainValue)
        dataSet.addSample(sample, (output,))
    trainer = BackpropTrainer(nn, dataSet)
    trainer.trainUntilConvergence()
    dataSet.clear()
Exemple #7
0
def trainUntilConvergence(nn, data, N, predictionLength):
    dataSet = SupervisedDataSet(5 * N, 1)
    start = 0
    end = len(data) + 1 - N - predictionLength
    for i in range(start, end):
        sample, mainValue = data.contiguousArray(i, i + N)
        output = data.normalizedMax(i + N + 1, i + N + predictionLength + 1,
                                    mainValue)
        dataSet.addSample(sample, (output, ))
    trainer = BackpropTrainer(nn, dataSet)
    trainer.trainUntilConvergence()
    dataSet.clear()
Exemple #8
0
    def updateNN(self, state, action, reward, state_new):
	#learning target
      	if reward == REWARD_WIN or reward == REWARD_LOSS: #terminal states
	    yi = reward
	else: #transition states
	    yi = reward + GAMMA * max(self.nn.activate(state_new))
      	dataSet = SupervisedDataSet(NODE_INPUT,NODE_OUTPUT)
	learn_target = self.nn.activate(state)
	learn_target[action] = yi
  	dataSet.addSample(state, learn_target) 
  	
  	trainer = BackpropTrainer(self.nn, dataSet)
  	trainer.train()

  	dataSet.clear()
class StateNetwork():
	'''
	用来存储状态转移的函数的,具体来说就是我给定一个input,返回给我下一个时刻的状态
	下一个时刻的状态不包括action
	'''
	def __init__(self, name='deep_state', inputNum=192, hidden1Num=192, hidden2Num=192, hidden3Num=192, outNum=144):
		self.net = buildNetwork(inputNum, hidden1Num, hidden2Num, hidden3Num, outNum)
		self.ds = SupervisedDataSet(inputNum, outNum)
		self.name = name
		self.turn = 0

	def train(self, input, output):
		self.ds.clear()
		self.ds.addSample(input, output)
		trainer = BackpropTrainer(self.net, self.ds)
		trainer.train()


	def saveNet(self):
		if not os.path.isdir(self.name):
			os.mkdir(self.name)
		print self.name  + '/' + str(self.turn), ' has saved'
		with open(self.name  + '/' + str(self.turn), 'w') as f:
			pickle.dump(self.net, f)

	def loadNet(self, turn=0):
		print 'loading ', self.name  + '/' + str(turn)
		time.sleep(1)
		if os.path.isfile(self.name  + '/' + str(turn)):
			with open(self.name  + '/' + str(turn), 'r') as f:
				self.net = pickle.load(f)

	def getValue(self, input):
		output = self.net.activate(input)
		for i,v in enumerate(output):
			if v > 0.5:
				output[i] = 1
			else:
				output[i] = 0
		return output

	def getInput(self, state, action, type=1):
		return RunFastAgent.getInput(state, action, type=type)

	def getOutput(self, state):
		input = RunFastAgent.getInput(state, [])
		return input[:144]
Exemple #10
0
def train(nn, data, N, predictionLength, iterations, validationSize):
    loss = 0.
    lossSize = 1.
    for n in range(iterations):
        dataSet = SupervisedDataSet(5 * N, 1)
        start = 1. * (len(data) - validationSize - 1 - N -
                      predictionLength) / iterations * n
        end = 1. * (len(data) - validationSize - 1 - N -
                    predictionLength) / iterations * (n + 1) - validationSize
        validation = end + validationSize
        start = int(start)
        end = int(end)
        validation = int(validation)
        for i in range(start, end):
            sample, mainValue = data.contiguousArray(i, i + N)
            output = data.normalizedMax(i + N + 1,
                                        i + N + predictionLength + 1,
                                        mainValue)
            dataSet.addSample(sample, (output, ))
        print "iteration: ", n, " start: ", start, " end: ", end
        trainer = BackpropTrainer(nn, dataSet)
        trainer.train()
        dataSet.clear()
        for i in range(end, validation):
            sample, mainValue = data.contiguousArray(i, i + N)
            realOutput = data.max(i + N + 1, i + N + predictionLength + 1)
            nnOutputValue = nn.activate(sample)[0] + mainValue
            dt = data.date(i + N + 1)
            currentLoss = nnOutputValue - realOutput
            loss += currentLoss * currentLoss
            print '============================'
            print dt
            print "NN: ", "{0:.10f}".format(
                nnOutputValue), " Real: ", "{0:.10f}".format(realOutput)
            print "LOSS: ", "{0:.10f}".format(currentLoss)
            print "LOSS TOTAL: ", "{0:.10f}".format(sqrt(loss / lossSize))
            print '============================'
            lossSize += 1.
class RunFastNetwork():
	'''
	用来存储runfast游戏中agent的Q值
	'''
	def __init__(self, name='', inputNum=192, hiddenNum=192, outNum=1):
		self.net = buildNetwork(inputNum, hiddenNum, outNum)
		self.ds = SupervisedDataSet(inputNum, outNum)
		self.name = name
		self.turn = 0

	def train(self, input, output):
		self.ds.clear()
		self.ds.addSample(input, output)
		trainer = BackpropTrainer(self.net, self.ds)
		trainer.train()

	def addLearner(self, learner):
		self.learner = learner

	def saveNet(self, filename=''):
		with open(self.name  + '/' + str(self.turn), 'w') as f:
			print self.name  + '/' + str(self.turn), ' has saved'
			pickle.dump(self, f)

	def loadNet(self, playName, turn=0):
		if os.path.isfile(playName  + '/' + str(turn)):
			with open(self.name  + '/' + str(turn), 'r') as f:
				print 'loading ', playName  + '/' + str(turn)
				time.sleep(0.5)
				obj = pickle.load(f)
				print obj.turn
				self.turn = obj.turn
				self.net = obj.net
				self.name = obj.name

	def getValue(self, input):
		return self.net.activate(input)
class Agent(object):
    def __init__(self, use_brain):
        self.price_belief_high = random.uniform(PRICE_LOW, PRICE_HIGH)
        self.price_belief_low = random.uniform(PRICE_LOW, self.price_belief_high)
        self.price = random.uniform(self.price_belief_low, self.price_belief_high)
        self.consumption_value_low = random.randint(15, 60) #Killowats used per day
        self.consumption_value_high = random.randint(self.consumption_value_low, 60)
        self.production_value = random.randint(2, 15) #Square Meters of Solar Panels

        self.no_trades = 0
        self.wealth = 0
        self.supply = 0
        self.demand = 0
        self.weather = 1.0
        self.power = 0.0
        self.reserve_power = 0.0
        self.observed_prices = [] #Prices which the agent successfully traded.
        
        self.use_brain = use_brain

        self.price_history = []
        self.wealth_history = []
        
        if use_brain:
            self.brain = buildNetwork(3, 40, 1)
            self.memory = SupervisedDataSet(3, 1)
            self.trainer = BackpropTrainer(self.brain)

    def sell(self, units, price):
        self.observed_prices.append(price)
        self.power -= units
        self.no_trades += 1
        self.wealth += (units * price)

    def buy(self, units, price):
        self.observed_prices.append(price)
        self.power += units
        self.no_trades += 1
        self.wealth -= (units * price)

    def day_begin(self, weather, market):
        self.price_history.append(self.price)
        self.wealth_history.append(self.wealth)
        
        self.weather = weather
        self.consumption_value = random.randint(self.consumption_value_low, self.consumption_value_high)
        self.power = ((self.production_value * self.weather) - self.consumption_value)

        #Use any reserve power if we have it.
        if self.reserve_power > 0:
            self.power += self.reserve_power
            self.reserve_power = 0

        #Update Supply and Demand unless "Smart Agent"
        if not self.use_brain or self.power <= 0 or len(market.price_history) < 3:
            self.update_supply_demand(market)
            return

        #Predict price
        buyers = [agent for agent in market.agents if agent.demand > 0]
        sellers = [agent for agent in market.agents if agent.supply > 0]

        supply = sum(seller.supply for seller in sellers)
        demand = sum(buyer.demand for buyer in buyers)
        weather = self.weather

        predicted_price = self.brain.activate((weather, supply, demand))[0]
        
        #Store power instead of selling it if price is going to be low.        
        threshold = statistics.median(market.price_history) #(PRICE_LOW + PRICE_HIGH) * 0.5
        if predicted_price < threshold:
            self.reserve_power += self.power
            self.power = 0
        self.update_supply_demand(market)

    def day_end(self, market):
        
        if not self.use_brain:
            return
        
        supply = market.asks[-1]
        demand = market.bids[-1]
        weather = self.weather
        price = market.price_history[-1]
        
        self.price_belief_low = self.brain.activate((weather, supply, demand))[0]
        self.price_belief_high = self.brain.activate((weather, supply, demand))[0]
        self.price = random.uniform(self.price_belief_low, self.price_belief_high)
        self.price_history[-1] = self.price
        
               
        self.memory.clear()
        self.memory.addSample((weather, supply, demand), (price,))
        self.trainer.trainOnDataset(self.memory)
        
    def update_price_belief(self, market, did_sell, success):
        public_mean_price = market.average_price()
        mean = (self.price_belief_low + self.price_belief_high) / 2
        confidence_sigma = 0.05
        
        delta_mean = mean - public_mean_price
        
        if success:
                        
            #If overpaid or undersold, shift towards mean
            if not did_sell and delta_mean > SIGNIFICANT:
                self.price_belief_low -= delta_mean / 2
                self.price_belief_high -= delta_mean / 2
            elif did_sell and delta_mean < -SIGNIFICANT:
                self.price_belief_low -= delta_mean / 2
                self.price_belief_high -= delta_mean / 2
                
                
            #increase confidence in price
            self.price_belief_low += confidence_sigma * mean
            self.price_belief_high -= confidence_sigma * mean
            
        else:
            
            #Shift belief towards means
            self.price_belief_low -= delta_mean / 2
            self.price_belief_high -= delta_mean / 2
            
                
            #Need lots of power? Buy for higher price
            if (not did_sell and self.demand > self.production_value * 2):
                confidence_sigma *= 2
            #Lots of power to sell? sell for lower price
            elif(did_sell and self.supply > self.consumption_value * 2):
                confidence_sigma *= 2
            #Otherise, check supply/demand
            else:
                asks = sum(market.asks) / len(market.asks)
                bids = sum(market.bids) / len(market.bids)
                supply_vs_demand = (asks - bids) / (asks + bids)
                if supply_vs_demand > SIG_IMBALANCE or supply_vs_demand < -SIG_IMBALANCE:
                    new_mean = public_mean_price * (1-supply_vs_demand)
                    delta_to_mean = mean - new_mean
                    self.price_belief_high -= delta_mean / 2
                    self.price_belief_low -= delta_mean / 2

            #decrease confidence in price
            self.price_belief_low -= confidence_sigma * mean
            self.price_belief_high += confidence_sigma * mean
            

        self.price = random.uniform(self.price_belief_low, self.price_belief_high)

    def update_supply_demand(self, market):
        self.demand = 0
        self.supply = 0
        if self.power > 0:
            if len(self.observed_prices) < 3:
                self.supply = self.power
            else:
                self.supply = round(self.power * self.price_favorability(market.average_price()))
                if self.supply < 1:
                    self.supply = 0
            self.reserve_power = self.power - self.supply
        elif self.power < 0:
            if len(self.observed_prices) < 3:
                self.demand = self.power * -1
            else:
                f = (1 - self.price_favorability(market.average_price()))
                self.demand = round(self.power * f) * -1
                if self.demand < 0:
                    self.demand = 0

    def price_favorability(self, value):
        
        max_n = max(self.observed_prices)
        min_n = min(self.observed_prices)
        value -= min_n
        max_n -= min_n
        min_n = 0
        value = value / max_n
        if value < 0:
            value = 0
        if value > 1:
            value = 1
        
        return value
    
    @property
    def price_belief_high(self):
        return self._price_belief_high

    @property
    def price_belief_low(self):
        return self._price_belief_low
    
    @price_belief_high.setter
    def price_belief_high(self, value):
        if value < PRICE_LOW:
            value = PRICE_LOW
        elif value > PRICE_HIGH:
            value = PRICE_HIGH
        self._price_belief_high = value

    @price_belief_low.setter
    def price_belief_low(self, value):
        if value < PRICE_LOW:
            value = PRICE_LOW
        elif value > PRICE_HIGH:
            value = PRICE_HIGH
        self._price_belief_low = value
Exemple #13
0
def train_network(net,
                  best_fraction,
                  trainer=default_trainer,
                  transit=default_transit):
    """
    Author: Xander
    This function performs the common grunt-work of 
    both build_network() and improve_network().
    """
    print "Building dataset..."
    ds = SupervisedDataSet(
        2 * transit.generate_stars * transit.generate_points,
        transit.generate_stars)
    for i in xrange(trainer.interval_count):
        print "Generating exoplanet transits..."
        ds.clear()
        for k in xrange(trainer.data_size):
            inpt, output = generate(transit=transit)
            ds.addSample(inpt, output)
        print "Building trainer..."
        network_trainer = BackpropTrainer(net, ds)
        print "Training..."
        for j in xrange(trainer.interval_size):
            msg = "Iteration"
            msg += " " * (
                len(str(trainer.interval_count * trainer.interval_size)) -
                len(str(trainer.interval_size * i + j + 1)) + 1)
            msg += str(trainer.interval_size * i + j + 1)
            msg += " of " + str(trainer.interval_count * trainer.interval_size)
            msg += ": error = "
            msg += str(network_trainer.train())
            print msg
        if i != trainer.interval_count - 1:
            print "Creating interval report..."
            report = message(net,
                             trainer.interval_check,
                             trainer=trainer,
                             transit=transit)
            print report[0][:-1]
            if report[1] > best_fraction:
                best_fraction = report[1]
                print "This interval was helpful and will be saved."
                print "Saving..."
                NetworkWriter.writeToFile(net, "../network.xml")
                print "Writing info..."
                f = open("../network_info.txt", "w")
                for line in report[0]:
                    f.write(line)
                f.close()
            else:
                print "This interval was not helpful and will be discarded."
                print "Retreiving older version..."
                net = NetworkReader.readFrom("../network.xml")
    print "Creating program report..."
    report = message(net, trainer.check_size, trainer=trainer, transit=transit)
    print report[0][:-1]
    if report[1] > best_fraction:
        best_fraction = report[1]
        print "This interval was helpful and will be saved."
        print "Saving..."
        NetworkWriter.writeToFile(net, "../network.xml")
        print "Writing info..."
        f = open("../network_info.txt", "w")
        for line in report[0]:
            f.write(line)
        f.close()
    else:
        print "This interval was not helpful and will be discarded."
        print "Retreiving older version..."
        net = NetworkReader.readFrom("../network.xml")
        print "Improving older report..."
        better_report = message(net=net,
                                size=trainer.check_size,
                                trainer=trainer,
                                transit=transit)
        print "Writing info..."
        f = open("../network_info.txt", "w")
        for line in better_report[0]:
            f.write(line)
        f.close()
Exemple #14
0
    batch_iter = 0
    while True:
        print("batch_iter: " + str(batch_iter))
        training_set_x, training_set_y = loadData(training_file, 1)
        print(len(training_set_x))
        if len(training_set_x) == 0:
            break
        
        print("there")
        for i in range(len(training_set_x)):
            dataset.addSample(training_set_x[i],training_set_y[i])
        print("here")
        trainer.train()
        print("now")
        dataset.clear()
        batch_iter += 1
    
    # Clear references to these so the garbage collector can clean them
    # once the garbage collector chooses to.
    del training_set_x
    del training_set_y
    
    correct = 0
    total = 0
    while True:
        
        print("Testing validation set")
        
        validation_set_x, validation_set_y = loadData(validation_file, 1)
        if len(validation_set_x) == 0:
class ANN:
    def __init__(self):
        self.name = "ANN"

    def getParams(self):
        return self.in_to_hidden.params, self.hidden_to_out.params

    def create_network(self, nFeatures, hidden1Size=20, nClasses=1):
        # create network object
        self.ffn = FeedForwardNetwork()

        # create layer objects
        inLayer = LinearLayer(nFeatures, name="input")
        hiddenLayer = SigmoidLayer(hidden1Size, name="hidden1")
        #hiddenLayer2 = SigmoidLayer(hidden2Size, name="hidden2")
        outLayer = LinearLayer(nClasses, name="output")

        # add layers to feed forward network
        self.ffn.addInputModule(inLayer)
        self.ffn.addModule(hiddenLayer)
        #self.ffn.addModule(hiddenLayer2)
        self.ffn.addOutputModule(outLayer)

        # add bias unit to layers
        self.ffn.addModule(BiasUnit(name='bias'))

        # establish connections between layers
        self.in_to_hidden = FullConnection(inLayer, hiddenLayer)
        #hidden_to_hidden = FullConnection(hiddenLayer, hiddenLayer2)
        self.hidden_to_out = FullConnection(hiddenLayer, outLayer)

        # print "into hidden: {}".format(len(in_to_hidden.params))
        # print "into out: {}".format(len(hidden_to_out.params))

        # add connections to network
        self.ffn.addConnection(self.in_to_hidden)
        #self.ffn.addConnection(hidden_to_hidden)
        self.ffn.addConnection(self.hidden_to_out)

        # necessary, sort layers into correct/certain order
        self.ffn.sortModules()

        # dataset object
        self.train_ds = SupervisedDataSet(nFeatures, nClasses)
        self.validate_ds = SupervisedDataSet(nFeatures, nClasses)

    # train network
    def train(self, TrainX, TrainY, ValidateX, ValidateY):
        # clear old dataset
        self.train_ds.clear()
        self.validate_ds.clear()

        # add data to dataset object (ds)
        for i in range(TrainX.shape[0]):
            self.train_ds.addSample(TrainX[i], TrainY[i])

        for i in range(ValidateX.shape[0]):
            self.validate_ds.addSample(ValidateX[i], ValidateY[i])

        # randomiz weights
        self.ffn.randomize()

        # Backprop trainer object
        self.trainer = BackpropTrainer(self.ffn,
                                       learningrate=.0775,
                                       momentum=.1)
        try:
            with Timer() as t:
                self.train_errors, self.val_errors \
                    = self.trainer.trainUntilConvergence(trainingData=self.train_ds, \
                                                         validationData=self.validate_ds, \
                                                         maxEpochs=500, \
                                                         continueEpochs=10)

            #return self.train_errors, self.val_errors
        except:
            print "Error occured while training model in ANN."

        #finally:
        #    print("ANN.py - Time to trainUntilConvergence: {:.03f} sec.".format(t.interval))

        return 'ANN'

    # predict depenent variable for dataset
    def predict(self, data):
        # if only make prediction for one sample
        if (len(data.shape) == 1):
            return self.ffn.activate(data)
        else:
            outputs = np.zeros(data.shape[0])
            for i in range(data.shape[0]):
                outputs[i] = self.ffn.activate(data[i])
            return outputs
class PyImpNetwork():
    def __init__(self):

        #flags for program learning states
        self.learning = 0
        self.compute = 0
        self.recurrent_flag = False
        # default case is a nonrecurrent feedforward network

        #number of mapper inputs and outputs
        self.num_inputs = 0
        self.num_outputs = 0
        self.num_hidden = 0

        #For the Mapper Signals
        self.l_inputs = {}
        self.l_outputs = {}

        #For the Artificial Neural Network
        self.data_input = {}
        self.data_output = {}

        self.learnMapperDevice = mapper.device("Implicit_LearnMapper", 9002)

    # mapper signal handler (updates self.data_input[sig_indx]=new_float_value)
    def h(self, sig, f):
        try:
            #print sig.name
            if '/in' in sig.name:
                s_indx = str.split(sig.name, "/in")
                self.data_input[int(s_indx[1])] = float(f)

            elif '/out' in sig.name:
                if (self.learning == 1):
                    print "FOUND /out and in learn mode", f
                    s_indx = str.split(sig.name, "/out")
                    self.data_output[int(s_indx[1])] = float(f)
                    print self.data_output[int(s_indx[1])]
        except:
            print "Exception, Handler not working"

    def hout(self, sig, f):
        try:
            if '/out' in sig.name:
                if (self.learning == 1):
                    print "FOUND /out and in learn mode", f
                    s_indx = str.split(sig.name, "/out")
                    self.data_output[int(s_indx[1])] = float(f)
                    print "Value saved to data_output", self.data_output[int(
                        s_indx[1])]
        except:
            print "Exception, Handler not working"

    def createANN(self, n_inputs, n_hidden, n_outputs):
        #create ANN
        self.net = buildNetwork(n_inputs,
                                n_hidden,
                                n_outputs,
                                bias=True,
                                hiddenclass=SigmoidLayer,
                                outclass=SigmoidLayer,
                                recurrent=self.recurrent_flag)

        #create ANN Dataset
        self.ds = SupervisedDataSet(n_inputs, n_outputs)

    def createMapperInputs(self, n_inputs):
        #create mapper signals (inputs)
        for l_num in range(n_inputs):
            self.l_inputs[l_num] = self.learnMapperDevice.add_input(
                "/in%d" % l_num, 1, 'f', None, 0, 1.0, self.h)
            print("creating input", "/in" + str(l_num))

        # Set initial Data Input values for Network to 0
        for s_index in range(n_inputs):
            self.data_input[s_index] = 0.0

    def createMapperOutputs(self, n_outputs):
        #create mapper signals (n_outputs)
        for l_num in range(n_outputs):
            self.l_outputs[l_num] = self.learnMapperDevice.add_output(
                "/out%d" % l_num, 1, 'f', None, 0.0, 1.0)
            self.l_outputs[l_num].set_query_callback(self.hout)
            print("creating output", "/out" + str(l_num))

        # Set initial Data Output values for Network to 0
        for s_index in range(n_outputs):
            self.data_output[s_index] = 0.0

    def setNumInputs(self, n_inputs):
        self.num_inputs = n_inputs

    def setNumeOutputs(self, n_outputs):
        self.num_outputs = n_outputs

    def setNumHiddenNodes(self, n_hidden):
        self.num_hidden = n_hidden

    def setReccurentFlag(self, flag):
        if (flag == "R"):
            self.recurrent_flag = True
        elif (flag == "F"):
            self.recurrent_flag = False

    def load_dataset(self, open_filename):
        self.ds = SupervisedDataSet.loadFromFile(open_filename)
        #print self.ds

    def save_dataset(self, filename):

        if str(filename[0]) != '':
            csv_file = open(filename[0] + ".csv", "w")
            csv_file.write("[inputs][outputs]\r\n")

        for inpt, tgt in self.ds:
            new_str = str("{" + repr(inpt) + "," + repr(tgt) + "}")
            new_str = new_str.strip('\n')
            new_str = new_str.strip('\r')
            new_str = new_str + "\r"
            csv_file.write(new_str)

        if len(new_str) > 1:
            csv_file.close()

    def save_net(self, save_filename):
        networkwriter.NetworkWriter.writeToFile(net, save_filename)

    def load_net(self, open_filename):
        from pybrain.tools.customxml import networkreader
        self.net = networkreader.NetworkReader.readFrom(open_filename)

    def clear_dataset(self):
        if self.ds != 0:
            self.ds.clear()

    def clear_network(self):
        #resets the module buffers but doesn't reinitialise the connection weights
        #TODO: reinitialise network here or make a new option for it.
        self.net.reset()

    def learn_callback(self):

        if self.learning == 0:
            print("learning is", self.learning)
            self.learning = 1

        elif self.learning == 1:
            print("learning is", self.learning)
            self.learning = 0

    def compute_callback(self):

        if self.compute == 1:
            self.compute = 0
            print("Compute network output is now OFF!")
        elif self.compute == 0:
            self.compute = 1
            print("Compute network output is now ON!")

    def train_callback(self):
        self.trainer = BackpropTrainer(self.net,
                                       learningrate=0.01,
                                       lrdecay=1,
                                       momentum=0.0,
                                       verbose=True)

        print 'MSE before', self.trainer.testOnData(self.ds, verbose=True)
        epoch_count = 0
        while epoch_count < 1000:
            epoch_count += 10
            self.trainer.trainUntilConvergence(dataset=self.ds, maxEpochs=10)
            networkwriter.NetworkWriter.writeToFile(self.net,
                                                    'autosave.network')

        print 'MSE after', self.trainer.testOnData(self.ds, verbose=True)
        print("\n")
        print 'Total epochs:', self.trainer.totalepochs

    def main_loop(self):
        self.learnMapperDevice.poll(1)

        if ((self.learning == 1) and (self.compute == 0)):

            # Query output values upon change in GUI
            for index in range(self.num_outputs):
                self.data_output[index] = self.l_outputs[index].query_remote()
                print self.data_output[index]

            print("Inputs: ")
            print(tuple(self.data_input.values()))
            print("Outputs: ")
            print(tuple(self.data_output.values()))

            self.ds.addSample(tuple(self.data_input.values()),
                              tuple(self.data_output.values()))

        if ((self.compute == 1) and (self.learning == 0)):
            activated_out = self.net.activate(tuple(self.data_input.values()))

            for out_index in range(self.num_outputs):
                self.data_output[out_index] = activated_out[out_index]
                self.l_outputs[out_index].update(self.data_output[out_index])
Exemple #17
0
class Image2Text:
    # size参数描述了要识别的每一个切分后的字符大小,因为神经网络要求全部识别内容等向量长度,
    #  一般设置为能够包含其中切割后最大的字符即可
    # types参数表示最终神经网络要分的类别数,也即总共出现的所有可能的字符种类数
    def __init__(self, size=(8, 12), types=12):
        self.imgsize = size
        self.types = types
        self.ds = SupervisedDataSet(size[0] * size[1], types)
        self.net = buildNetwork(self.imgsize[0] * self.imgsize[1],
                                100,
                                types,
                                bias=True)

    def cutting(self, im):
        w, h = im.size
        data = im.getdata()
        cut_imgs = []

        vlast_sum = 0
        vbegin = 0
        vend = 0
        for i in xrange(h):
            vsum = 0
            for j in xrange(w):
                vsum += data[i * w + j]
            if vsum > 0 and vlast_sum == 0:
                vbegin = i
            if vsum == 0 and vlast_sum > 0:
                vend = i

                begin = 0
                end = 0
                last_sum = 0

                for j in xrange(w):
                    sum = 0
                    for i in xrange(vbegin, vend):
                        sum += data[i * w + j]

                    if sum > 0 and last_sum == 0:
                        begin = j
                    if sum == 0 and last_sum > 0:
                        end = j
                        cut_imgs.append(im.crop((begin, vbegin, end, vend)))
                        # print begin, vbegin, end, vend

                    last_sum = sum

            vlast_sum = vsum

        return cut_imgs

    def resize(self, im):
        img = Image.new('1', self.imgsize, 0)
        img.paste(im, (0, 0))
        return img

    def ann_addsample(self, input, output):
        myoutput = [0 for i in xrange(self.types)]
        myoutput[output] = 1
        self.ds.addSample(input, myoutput)

    def ann_clear(self):
        self.ds.clear()

    def ann_train(self):
        trainer = BackpropTrainer(self.net, self.ds,
                                  momentum=0.1,
                                  verbose=True,
                                  weightdecay=0.0001)
        trainer.trainUntilConvergence(maxEpochs=50, validationProportion=0.01)

    def ann_sim(self, input):
        output = self.net.activate(input)
        maxoutput = 0
        maxi = 0
        for i in range(len(output)):
            if maxoutput < output[i]:
                maxoutput = output[i]
                maxi = i
        return maxi

    def ann_save(self, path='ann.db'):
        fileObject = open(path, 'w')
        pickle.dump(self.net, fileObject)
        fileObject.close()

    def ann_load(self, path='ann.db'):
        try:
            with open(path, 'r') as data:
                self.net = pickle.load(data)
            return True
        except IOError as err:
            print("File Error:"+str(err)) #str()将对象转换为字符串
            return False

    def open_file(self, path):
        fp = open(path, "rb")
        im = Image.open(fp)
        return self.open(im)

    def open(self, im):

        # 二值化
        # im = im.convert('1')
        im = im.convert('L')
        im = im.point(lambda x: 255 if x > 196 else 0)
        im = im.convert('1')
        im = im.point(lambda i: 1 - i / 255)

        # 切割图片
        imgs = self.cutting(im)

        # 等大小化
        for i in range(len(imgs)):
            imgs[i] = self.resize(imgs[i])
            # imgs[i].save('a/{0}.bmp'.format(i), option={'progression': True, 'quality': 90, 'optimize': True})
        return imgs
def train_network(net,
                  best_fraction,
                  trainer=default_trainer,
                  transit=default_transit):
    """
    Author: Xander
    This function performs the common grunt-work of 
    both build_network() and improve_network().
    """
    print "Building dataset..."
    ds = SupervisedDataSet(2*transit.generate_stars*transit.generate_points,
                           transit.generate_stars)
    for i in xrange(trainer.interval_count):
        print "Generating exoplanet transits..."
        ds.clear()
        for k in xrange(trainer.data_size):
            inpt, output = generate(transit=transit)
            ds.addSample(inpt, output)
        print "Building trainer..."
        network_trainer = BackpropTrainer(net, ds)
        print "Training..."
        for j in xrange(trainer.interval_size):
            msg = "Iteration"
            msg += " "*(len(str(trainer.interval_count*trainer.interval_size))
                      - len(str(trainer.interval_size*i + j + 1)) + 1)
            msg += str(trainer.interval_size*i + j + 1)
            msg += " of " + str(trainer.interval_count * trainer.interval_size)
            msg += ": error = "
            msg += str(network_trainer.train())
            print msg
        if i != trainer.interval_count - 1:
            print "Creating interval report..."
            report = message(net,
                             trainer.interval_check,
                             trainer=trainer,
                             transit=transit)
            print report[0][:-1]
            if report[1] > best_fraction:
                best_fraction = report[1]
                print "This interval was helpful and will be saved."
                print "Saving..."
                NetworkWriter.writeToFile(net, "../network.xml")
                print "Writing info..."
                f = open("../network_info.txt", "w")
                for line in report[0]:
                    f.write(line)
                f.close()
            else:
                print "This interval was not helpful and will be discarded."
                print "Retreiving older version..."
                net = NetworkReader.readFrom("../network.xml")
    print "Creating program report..."
    report = message(net,
                     trainer.check_size,
                     trainer=trainer,
                     transit=transit)
    print report[0][:-1]
    if report[1] > best_fraction:
        best_fraction = report[1]
        print "This interval was helpful and will be saved."
        print "Saving..."
        NetworkWriter.writeToFile(net, "../network.xml")
        print "Writing info..."
        f = open("../network_info.txt", "w")
        for line in report[0]:
            f.write(line)
        f.close()
    else:
        print "This interval was not helpful and will be discarded."
        print "Retreiving older version..."
        net = NetworkReader.readFrom("../network.xml")
        print "Improving older report..."
        better_report = message(net=net,
                                size=trainer.check_size,
                                trainer=trainer,
                                transit=transit)
        print "Writing info..."
        f = open("../network_info.txt", "w")
        for line in better_report[0]:
            f.write(line)
        f.close()
Exemple #19
0
def random_data(dataset):
    ds=SupervisedDataSet(dataset['input'].shape[1], 2)
    ds.clear()
    for i in np.random.permutation(len(dataset)):
        ds.addSample(dataset['input'][i],dataset['target'][i])
    return ds
class Slave(object):
    def __init__(self):
        self.net = FeedForwardNetwork()

    def createNetwork(self, inLayer, inLType, outLayer, outLType, hLayerNum, hiddenLayers, hLayersType, bias=True, outPutBias=True):

        del self.net
        self.net = FeedForwardNetwork()

        if bias:
            # Definição da camada de entrada
            if inLType == 0:
                self.net.addInputModule(LinearLayer(inLayer,name='in'))
            elif inLType == 1:
                self.net.addInputModule(SigmoidLayer(inLayer,name='in'))
            elif inLType == 2:
                self.net.addInputModule(TanhLayer(inLayer,name='in'))
            elif inLType == 3:
                self.net.addInputModule(SoftmaxLayer(inLayer,name='in'))
            elif inLType == 4:
                self.net.addInputModule(GaussianLayer(inLayer,name='in'))

            # Definição das camadas escondidas
            self.hiddenLayers = []
            if hLayersType == 0:
                for i in range(0, hLayerNum):
                    self.hiddenLayers.append(LinearLayer(hiddenLayers[i]))
                    self.net.addModule(self.hiddenLayers[i])
            elif hLayersType == 1:
                for i in range(0, hLayerNum):
                    self.hiddenLayers.append(SigmoidLayer(hiddenLayers[i]))
                    self.net.addModule(self.hiddenLayers[i])
            elif hLayersType == 2:
                for i in range(0, hLayerNum):
                    self.hiddenLayers.append(TanhLayer(hiddenLayers[i]))
                    self.net.addModule(self.hiddenLayers[i])
            elif hLayersType == 3:
                for i in range(0, hLayerNum):
                    self.hiddenLayers.append(SoftmaxLayer(hiddenLayers[i]))
                    self.net.addModule(self.hiddenLayers[i])
            elif hLayersType == 4:
                for i in range(0, hLayerNum):
                    self.hiddenLayers.append(GaussianLayer(hiddenLayers[i]))
                    self.net.addModule(self.hiddenLayers[i])

            # Definição da camada de saída
            if outLType == 0:
                self.net.addOutputModule(LinearLayer(outLayer,name='out'))
            elif outLType == 1:
                self.net.addOutputModule(SigmoidLayer(outLayer,name='out'))
            elif outLType == 2:
                self.net.addOutputModule(TanhLayer(outLayer,name='out'))
            elif outLType == 3:
                self.net.addOutputModule(SoftmaxLayer(inLayer,name='out'))
            elif outLType == 4:
                self.net.addOutputModule(GaussianLayer(outLayer,name='out'))

            # Criação do Bias
            self.net.addModule(BiasUnit(name='networkBias'))

            # Conexão entre as diversas camadas
            if self.hiddenLayers:
                self.net.addConnection(FullConnection(self.net['in'], self.hiddenLayers[0]))
                for h1, h2 in zip(self.hiddenLayers[:-1], self.hiddenLayers[1:]):
                    self.net.addConnection(FullConnection(self.net['networkBias'],h1))
                    self.net.addConnection(FullConnection(h1,h2))
                if outPutBias:
                    self.net.addConnection(FullConnection(self.net['networkBias'],self.net['out']))
                self.net.addConnection(FullConnection(self.hiddenLayers[-1],self.net['out']))
            else:
                if outPutBias:
                    self.net.addConnection(FullConnection(self.net['networkBias'],self.net['out']))
                self.net.addConnection(FullConnection(self.net['in'],self.net['out']))
        else:
            # Definição da camada de entrada
            if inLType == 0:
                self.net.addInputModule(LinearLayer(inLayer,name='in'))
            elif inLType == 1:
                self.net.addInputModule(SigmoidLayer(inLayer,name='in'))
            elif inLType == 2:
                self.net.addInputModule(TanhLayer(inLayer,name='in'))
            elif inLType == 3:
                self.net.addInputModule(SoftmaxLayer(inLayer,name='in'))
            elif inLType == 4:
                self.net.addInputModule(GaussianLayer(inLayer,name='in'))

            # Definição das camadas escondidas
            self.hiddenLayers = []
            if hLayersType == 0:
                for i in range(0, hLayerNum):
                    self.hiddenLayers.append(LinearLayer(hiddenLayers[i]))
                    self.net.addModule(self.hiddenLayers[i])
            elif hLayersType == 1:
                for i in range(0, hLayerNum):
                    self.hiddenLayers.append(SigmoidLayer(hiddenLayers[i]))
                    self.net.addModule(self.hiddenLayers[i])
            elif hLayersType == 2:
                for i in range(0, hLayerNum):
                    self.hiddenLayers.append(TanhLayer(hiddenLayers[i]))
                    self.net.addModule(self.hiddenLayers[i])
            elif hLayersType == 3:
                for i in range(0, hLayerNum):
                    self.hiddenLayers.append(SoftmaxLayer(hiddenLayers[i]))
                    self.net.addModule(self.hiddenLayers[i])
            elif hLayersType == 4:
                for i in range(0, hLayerNum):
                    self.hiddenLayers.append(GaussianLayer(hiddenLayers[i]))
                    self.net.addModule(self.hiddenLayers[i])

            # Definição da camada de saída
            if outLType == 0:
                self.net.addOutputModule(LinearLayer(outLayer,name='out'))
            elif outLType == 1:
                self.net.addOutputModule(SigmoidLayer(outLayer,name='out'))
            elif outLType == 2:
                self.net.addOutputModule(TanhLayer(outLayer,name='out'))
            elif outLType == 3:
                self.net.addOutputModule(SoftmaxLayer(inLayer,name='out'))
            elif outLType == 4:
                self.net.addOutputModule(GaussianLayer(outLayer,name='out'))

            if self.hiddenLayers:
                self.net.addConnection(FullConnection(self.net['in'], self.hiddenLayers[:1]))
                for h1, h2 in zip(self.hiddenLayers[:-1], self.hiddenLayers[1:]):
                    self.net.addConnection(FullConnection(h1,h2))
                self.net.addConnection(FullConnection(self.hiddenLayers[-1:],self.net['out']))
            else:
                self.net.addConnection(FullConnection(self.net['in'],self.net['out']))

        # Termina de construir a rede e a monta corretamente
        self.net.sortModules()

    def setParameters(self, parameters):
        self.net._setParameters(parameters)

    def getParameters(self):
        return self.net.params.tolist()

    def createDataSet(self, ds):
        inp = ds.indim
        targ = ds.outdim

        self.ds = SupervisedDataSet(inp, targ)

        for i,t in ds:
            self.ds.addSample(i,t)

    def updateDataSet(self, ds):
        self.ds.clear(True)
        for i,t in ds:
            self.ds.addSample(i,t)
        self.trainer.setData(self.ds)

    def createTrainer(self, learnrate=0.01, ldecay=1.0, momentum=0.0, batchlearn=False, wdecay=0.0):
        self.trainer = BackpropTrainer(self.net, self.ds, learningrate=learnrate, lrdecay=ldecay, momentum=momentum, batchlearning=batchlearn, weightdecay=wdecay)

    def trainNetwork(self):
        self.trainer.train()

    def loadNetwork(self, net):
        del self.net
        self.net = net
Exemple #21
0
class Image2Text:
    # size参数描述了要识别的每一个切分后的字符大小,因为神经网络要求全部识别内容等向量长度,
    #  一般设置为能够包含其中切割后最大的字符即可
    # types参数表示最终神经网络要分的类别数,也即总共出现的所有可能的字符种类数
    def __init__(self, size=(8, 12), types=12):
        self.imgsize = size
        self.types = types
        self.ds = SupervisedDataSet(size[0] * size[1], types)
        self.net = buildNetwork(self.imgsize[0] * self.imgsize[1],
                                100,
                                types,
                                bias=True)

    def cutting(self, im):
        w, h = im.size
        data = im.getdata()
        cut_imgs = []

        vlast_sum = 0
        vbegin = 0
        vend = 0
        for i in xrange(h):
            vsum = 0
            for j in xrange(w):
                vsum += data[i * w + j]
            if vsum > 0 and vlast_sum == 0:
                vbegin = i
            if vsum == 0 and vlast_sum > 0:
                vend = i

                begin = 0
                end = 0
                last_sum = 0

                for j in xrange(w):
                    sum = 0
                    for i in xrange(vbegin, vend):
                        sum += data[i * w + j]

                    if sum > 0 and last_sum == 0:
                        begin = j
                    if sum == 0 and last_sum > 0:
                        end = j
                        cut_imgs.append(im.crop((begin, vbegin, end, vend)))
                        # print begin, vbegin, end, vend

                    last_sum = sum

            vlast_sum = vsum

        return cut_imgs

    def resize(self, im):
        img = Image.new('1', self.imgsize, 0)
        img.paste(im, (0, 0))
        return img

    def ann_addsample(self, input, output):
        myoutput = [0 for i in xrange(self.types)]
        myoutput[output] = 1
        self.ds.addSample(input, myoutput)

    def ann_clear(self):
        self.ds.clear()

    def ann_train(self):
        trainer = BackpropTrainer(self.net,
                                  self.ds,
                                  momentum=0.1,
                                  verbose=True,
                                  weightdecay=0.0001)
        trainer.trainUntilConvergence(maxEpochs=50, validationProportion=0.01)

    def ann_sim(self, input):
        output = self.net.activate(input)
        maxoutput = 0
        maxi = 0
        for i in range(len(output)):
            if maxoutput < output[i]:
                maxoutput = output[i]
                maxi = i
        return maxi

    def ann_save(self, path='ann.db'):
        fileObject = open(path, 'w')
        pickle.dump(self.net, fileObject)
        fileObject.close()

    def ann_load(self, path='ann.db'):
        try:
            with open(path, 'r') as data:
                self.net = pickle.load(data)
            return True
        except IOError as err:
            print("File Error:" + str(err))  #str()将对象转换为字符串
            return False

    def open_file(self, path):
        fp = open(path, "rb")
        im = Image.open(fp)
        return self.open(im)

    def open(self, im):

        # 二值化
        # im = im.convert('1')
        im = im.convert('L')
        im = im.point(lambda x: 255 if x > 196 else 0)
        im = im.convert('1')
        im = im.point(lambda i: 1 - i / 255)

        # 切割图片
        imgs = self.cutting(im)

        # 等大小化
        for i in range(len(imgs)):
            imgs[i] = self.resize(imgs[i])
            # imgs[i].save('a/{0}.bmp'.format(i), option={'progression': True, 'quality': 90, 'optimize': True})
        return imgs
Exemple #22
0
X_train, y_train = X[:offset], y[:offset]
X_test, y_test = X[offset:], y[offset:]

# We will vary the training set so that we have 10 different sizes
sizes = linspace(10, len(X_train), 10)
train_err = zeros(len(sizes))
test_err = zeros(len(sizes))

# Build a network with 3 hidden layers
net = buildNetwork(13, 9, 7, 5, 1)
# The dataset will have 13 input features and 1 output
ds = SupervisedDataSet(13, 1)

for i,s in enumerate(sizes):
    # Populate the dataset for training
    ds.clear()
    for j in range(1, int(s)):
        ds.addSample(X_train[j], y_train[j])

    # Setup a backprop trainer
    trainer = BackpropTrainer(net, ds)

    # Train the NN for 50 epochs
    # The .train() function returns MSE over the training set
    for e in range(0, 50):
        train_err[i] = trainer.train()

    # Find labels for the test set
    y = zeros(len(X_test))
    for j in range(0, len(X_test)):
        y[j] = net.activate(X_test[j])
Exemple #23
0
class motko:
    @timing_function
    def pybrain_init(self, input_amount=7, output_amount=8, hidden_layers=6):
        # TODO Randomize Hiden clcasses, ongoing...
        # because threading
        random.jumpahead(1252157)
        self.hiddenLayerAmount = random.randint(1, hidden_layers * 2)
        self.hiddenLayerNeuronsAmount = []
        # layerlist = [LinearLayer,SigmoidLayer,TanhLayer, GaussianLayer, SoftmaxLayer]  # for future use
        self.ds = SupervisedDataSet(input_amount, output_amount)
        self.nn = FeedForwardNetwork()
        self.inLayer = LinearLayer(input_amount, "in")
        # self.bias = BiasUnit(name="bias")
        if (random.randint(0, 100) >= 50):
            self.outLayer = LinearLayer(
                output_amount, "out")  # could be lineare layer or softmax???
        else:
            self.outLayer = SoftmaxLayer(
                output_amount, "out")  # could be lineare layer or softmax???
        self.hiddenlayers = []
        self.connections = []
        self.nn.addInputModule(self.inLayer)
        self.nn.addOutputModule(self.outLayer)
        # self.nn.addModule(self.bias)
        # self.nn.addConnection(FullConnection(self.inLayer, self.bias))

        for i in range(
                self.hiddenLayerAmount
        ):  # example math.random(hidden_layers, hidden_layers*10)???
            self.hiddenLayerNeuronsAmount.append(
                random.randint(1, hidden_layers * 2))
            if (random.randint(0, 100) >= 50):
                self.hiddenlayers.append(
                    TanhLayer(self.hiddenLayerNeuronsAmount[i],
                              "hidden{}".format(i))
                )  # tanh or  sigmoid ??? and how many neurons ? now it is hidden_layers amount
            else:
                self.hiddenlayers.append(
                    SigmoidLayer(self.hiddenLayerNeuronsAmount[i],
                                 "hidden{}".format(i))
                )  # tanh or  sigmoid ??? and how many neurons ? now it is hidden_layers amount

            if (i == 0):
                self.connections.append(
                    FullConnection(self.inLayer,
                                   self.hiddenlayers[i - 1],
                                   name="in_to_hid"))
            else:
                self.connections.append(
                    FullConnection(self.hiddenlayers[i - 1],
                                   self.hiddenlayers[i],
                                   name="hid{}_to_hid{}".format(i - 1, i)))
            self.nn.addModule(self.hiddenlayers[i])

        self.connections.append(
            FullConnection(self.hiddenlayers[len(self.hiddenlayers) - 1],
                           self.outLayer,
                           name="hid_to_out"))

        for i in range(len(self.connections)):
            self.nn.addConnection(self.connections[i])

        self.nn.sortModules()
        # self.printlog(self.getliveinfo2())
        # self.printlog("hiddenLayerAmount:{}".format(self.hiddenLayerAmount))

    @timing_function
    def CreateTrainingset(self, color, smallerTS=False):
        if (smallerTS):
            self.printlog("starting to create trainignset")
            sys.stdout.flush()
            # inputs are: energy 0, food avail 1, food left 2, food right 3, food color 4, color 5, meeting motko color 6,
            e = -0.30
            fa = -0.20
            fl = -0.20
            fr = -0.20
            for _ in range(5):
                e = e + 0.25
                fa = -0.20
                fl = -0.20
                fr = -0.20
                for _ in range(5):
                    fa = fa + 0.25
                    fl = -0.20
                    fr = -0.20
                    for _ in range(5):
                        fl = fl + 0.25
                        fr = -0.20
                        for _ in range(6):
                            fr = fr + 0.25
                            for fc in range(5):
                                for mtc in range(5):
                                    # self.printlog("self.ds.addSample([%s], [%s]" % (" ".join(str(x) for x in self.roundfloat([e, fa, fl, fr, fc, c, mtc])), " ".join(str(x) for x in self.roundfloat(self.gettraining2([e, fa, fl, fr, fc, c, mtc], self.nn.activate([e, fa, fl, fr, fc, c, mtc]))))))
                                    self.ds.addSample(
                                        [e, fa, fl, fr, fc, color, mtc],
                                        self.gettraining2(
                                            [e, fa, fl, fr, fc, color, mtc],
                                            self.nn.activate([
                                                e, fa, fl, fr, fc, color, mtc
                                            ])))
            self.saveDS("Basic_Test_TrainingSet_{0}.ds".format(color))
            self.printlog("Create trainignset done")
            self.printlog("starting to create trainignset")
            sys.stdout.flush()
        else:
            for e in range(1, 11):
                for fa in range(1, 11, 2):
                    for fl in range(1, 11, 2):
                        for fr in range(1, 11, 2):
                            for fc in range(5):
                                for mtc in range(5):
                                    # self.printlog("self.ds.addSample([%s], [%s]" % (" ".join(str(x) for x in self.roundfloat([e*0.1, fa*0.1, fl*0.1, fr*0.1, fc, c, mtc])), " ".join(str(x) for x in self.roundfloat(self.gettraining2([e*0.1, fa*0.1, fl*0.1, fr*0.1, fc, c, mtc], self.nn.activate([e, fa, fl, fr, fc, c, mtc])))))
                                    self.ds.addSample(
                                        [
                                            e * 0.1, fa * 0.1, fl * 0.1,
                                            fr * 0.1, fc, color, mtc
                                        ],
                                        self.gettraining2([
                                            e * 0.1, fa * 0.1, fl * 0.1,
                                            fr * 0.1, fc, color, mtc
                                        ],
                                                          self.nn.activate([
                                                              e, fa, fl, fr,
                                                              fc, color, mtc
                                                          ])))
            self.saveDS("Basic_TrainingSet_{0}.ds".format(color))
            self.printlog("Create trainignset done")
            sys.stdout.flush()

    @timing_function
    def trainerTrainUntilConvergence(self):
        for i in range(1):
            self.printlog("before", self.trainer.train())
            sys.stdout.flush()
            self.trainer.trainEpochs(1)
            # self.trainer.trainUntilConvergence(validationProportion=0.2)
        self.currenterror = self.trainer.train()
        self.printlog("after", self.trainer.train())
        sys.stdout.flush()

    @timing_function
    def trainloopamount(self, Trainingloops=1, printvalues=True):
        for i in range(Trainingloops - 1):
            # self.trainer.train()
            self.trainer.train()  # , self.nn.params)

        self.printlog(self.trainer.train())
        self.currenterror = self.trainer.train()
        sys.stdout.flush()

    @timing_function
    def trainfromfileds(self, fileds, loops=10, trainUntilConvergence=False):
        # self.printlog("Loading training set {} samples long".format(len(fileds)))
        sys.stdout.flush()
        filedstrainer = BackpropTrainer(
            self.nn, fileds, learningrate=0.6,
            momentum=0.4)  # small learning rate should it be bigger?
        # self.printlog("Loading training set done")
        sys.stdout.flush()
        if (trainUntilConvergence):
            # self.printlog("Starting trainUntilConvergence {} loops".format(loops))
            for i in range(1, loops + 1):
                self.currenterror = filedstrainer.train()
                self.printlog("Loop {}, before error:{}".format(
                    i, self.currenterror))
                sys.stdout.flush()
                filedstrainer.trainEpochs(1)
                # filedstrainer.trainUntilConvergence(validationProportion=0.2)
                self.currenterror = filedstrainer.train()
                self.printlog("Loop {}, after error:{}".format(
                    i, self.currenterror))
                sys.stdout.flush()
        else:
            # self.printlog("Starting training {} loops".format(loops))
            for i in range(1, loops + 1):
                self.currenterror = filedstrainer.train()
                self.printlog("Loop {}, error:{}".format(i, self.currenterror))
                sys.stdout.flush()

    @timing_function
    def saveDS(self, DSFilename):
        self.ds.saveToFile(DSFilename)

    @timing_function
    def responce(self, liveinput):
        self.trainingresult = self.gettraining2(liveinput,
                                                self.nn.activate(liveinput))
        if (self.trainsteps > self.trytolivesteps):
            self.printlog("{0}: {1}: {2}".format(self.trainsteps,
                                                 self.trytolivesteps,
                                                 self.aftermovestrain))
            self.ds.addSample(liveinput, self.trainingresult)
            if (self.trainsteps - self.trytolivesteps == self.aftermovestrain):
                self.trainer = BackpropTrainer(
                    self.nn, self.ds, learningrate=0.6,
                    momentum=0.1)  # small learning rate should it be bigger?
                # self.trainer.trainEpochs(1)
                # self.currenterror = self.trainer.train()
                # self.printlog("trainUntilConvergence1: %s" % (self.currenterror))
                if (self.test):
                    self.currenterror = self.trainer.train()
                else:
                    for _ in range(100):
                        self.trainer.trainUntilConvergence()
                    self.currenterror = self.trainer.train()
                    self.printlog(self.getliveinfo())
                # self.printlog("curren error {}".format(self.currenterror))
                # self.printlog("%s: %s: %s" % (" ".join(str(x) for x in self.roundfloat(liveinput)), " ".join(str(x) for x in self.roundfloat(self.trainingresult)), " ".join(str(x) for x in self.roundfloat(self.nn.activate(liveinput)))))
                self.trainsteps = 0
                self.trainings += 1

        if (len(self.ds) == 500):
            self.ds.clear()
        return self.nn.activate(liveinput)

    @timing_function
    def collision(self, collider, collidersize):
        if (int(self.X) < int(collider[0]) + int(collidersize[0])
                and int(self.X) + int(self.size[0]) > int(collider[0])
                and int(self.Y) < int(collider[1]) + int(collidersize[1])
                and int(self.size[1]) + int(self.Y) > int(collider[1])):
            # print (collider, self.X, self.Y)
            return 1  # collision
        else:
            return 0

    @timing_function
    def __init__(self, filename, eartsize, num_hiddeLayers, test=False):
        logging.basicConfig(filename="motkot.log",
                            format='%(asctime)s:%(levelname)s:%(message)s',
                            level=logging.INFO)
        logging.info("motkot start")
        self.cwd = os.getcwd()
        self.test = test
        self.filename = filename
        self.pybrain_init(hidden_layers=num_hiddeLayers)

        self.eartsize = eartsize
        self.X = random.randint(0, eartsize[0])
        self.Y = random.randint(0, eartsize[1])
        self.consumption = 0.001
        self.RED = (255, 0, 0)
        self.BLACK = (0, 0, 0)
        self.GREEN = (0, 255, 0)
        self.BLUE = (0, 0, 255)
        colors = []
        colors.append(self.RED)
        colors.append(self.BLACK)
        colors.append(self.GREEN)
        colors.append(self.BLUE)

        # inputs are: energy 0, food avail 1, food left 2, food right 3, food color 4, color 5, meeting motko color 6,
        self.energy = 0.9
        self.foodavail = 0
        self.foodInLeft = 0
        self.foodInRight = 0
        self.foodcolor = 4  # no food
        self.colornumber = random.randrange(3)
        self.meetinmotkocolor = 4  # no motko

        # outputs are: eat 0, eat amount 1, move 2, turn left 4, turn tight 5, kill 6, flee 7, sex 8
        self.eat = 0
        self.eatamount = 0
        self.move = 0
        self.turnleft = 0
        self.turnright = 0
        self.kill = 0
        self.flee = 0
        self.sex = 0

        self.color = colors[self.colornumber]
        self.shadow = []
        self.shadowlength = 100
        self.startime = datetime.datetime.fromtimestamp(
            time.mktime(time.gmtime()))
        self.movecount = 0
        self.movememory = []
        self.trainsteps = 0
        self.aftermovestrain = 100
        self.trytolivesteps = 2000
        self.randomcount = random.randint(5, 50)

        self.size = (5 + int(self.energy * 6))
        self.direction = 0
        self.directionvector = [] * 8
        self.directionvector.append([1, 0])
        self.directionvector.append([1, 1])
        self.directionvector.append([0, 1])
        self.directionvector.append([-1, 1])
        self.directionvector.append([-1, 0])
        self.directionvector.append([-1, -1])
        self.directionvector.append([0, -1])
        self.directionvector.append([1, -1])

        self.eyeleftplace = [] * 2
        self.eyeleftplace.append(0)
        self.eyeleftplace.append(1)

        self.eyerightplace = [] * 2
        self.eyerightplace.append(0)
        self.eyerightplace.append(1)
        self.eyesightsizeleft = [self.size, (self.size + 10)]
        self.eyesightsizeright = [self.size, (self.size + 10)]
        self.seteyes()
        self.randmovevector()
        self.trainings = 0
        self.currenterror = 10
        self.trainingresult = []
        self.doodReason = ""

    @timing_function
    def saveLog(self, filename, strinki, fileaut):
        if (os.path.isdir(os.path.join(os.getcwd(), 'logs')) is not True):
            os.makedirs(os.path.join(os.getcwd(), 'logs'))
        target = open(os.path.join(os.getcwd(), 'logs', filename), fileaut)
        if (not isinstance(strinki, str)):
            for item in strinki:
                target.write("%s\n" % item)
        else:
            target.write(strinki)
        target.close()

    @timing_function
    def seteyes(self):
        if (self.directionvector[self.direction][0] >= 1
                and self.directionvector[self.direction][1] == 0):
            self.eyeleftplace[0] = self.X + self.size
            self.eyeleftplace[1] = self.Y - (self.size + 15)
            self.eyerightplace[0] = self.X + self.size
            self.eyerightplace[1] = self.Y + self.size + self.size
            self.eyesightsizeleft = [self.size, 15]
            self.eyesightsizeright = [self.size, 15]
        elif (self.directionvector[self.direction][0] >= 1
              and self.directionvector[self.direction][1] >= 1):
            self.eyeleftplace[0] = self.X + self.size + self.size
            self.eyeleftplace[1] = self.Y
            self.eyerightplace[0] = self.X
            self.eyerightplace[1] = self.Y + (self.size + self.size)
            self.eyesightsizeleft = [15, self.size]
            self.eyesightsizeright = [self.size, 15]
        elif (self.directionvector[self.direction][0] == 0
              and self.directionvector[self.direction][1] >= 1):
            self.eyeleftplace[0] = self.X + (self.size + self.size)
            self.eyeleftplace[1] = self.Y + self.size
            self.eyerightplace[0] = self.X - (self.size + 15)
            self.eyerightplace[1] = self.Y + self.size
            self.eyesightsizeleft = [15, self.size]
            self.eyesightsizeright = [15, self.size]
        elif (self.directionvector[self.direction][0] <= -1
              and self.directionvector[self.direction][1] >= 1):
            self.eyeleftplace[0] = self.X - (self.size + 15)
            self.eyeleftplace[1] = self.Y
            self.eyerightplace[0] = self.X
            self.eyerightplace[1] = self.Y + (self.size + self.size)
            self.eyesightsizeleft = [15, self.size]
            self.eyesightsizeright = [self.size, 15]
        elif (self.directionvector[self.direction][0] <= -1
              and self.directionvector[self.direction][1] == 0):
            self.eyeleftplace[0] = self.X - self.size
            self.eyeleftplace[1] = self.Y - (self.size + 15)
            self.eyerightplace[0] = self.X - self.size
            self.eyerightplace[1] = self.Y + (self.size + self.size)
            self.eyesightsizeleft = [self.size, 15]
            self.eyesightsizeright = [self.size, 15]
        elif (self.directionvector[self.direction][0] <= -1
              and self.directionvector[self.direction][1] <= -1):
            self.eyeleftplace[0] = self.X
            self.eyeleftplace[1] = self.Y - (self.size + 15)
            self.eyerightplace[0] = self.X - (self.size + 15)
            self.eyerightplace[1] = self.Y
            self.eyesightsizeleft = [self.size, 15]
            self.eyesightsizeright = [15, self.size]
        elif (self.directionvector[self.direction][0] == 0
              and self.directionvector[self.direction][1] <= -1):
            self.eyeleftplace[0] = self.X - (self.size + 15)
            self.eyeleftplace[1] = self.Y - self.size
            self.eyerightplace[0] = self.X + (self.size + self.size)
            self.eyerightplace[1] = self.Y - self.size
            self.eyesightsizeleft = [15, self.size]
            self.eyesightsizeright = [15, self.size]
        elif (self.directionvector[self.direction][0] >= 1
              and self.directionvector[self.direction][1] <= -1):
            self.eyeleftplace[0] = self.X
            self.eyeleftplace[1] = self.Y - (self.size + 15)
            self.eyerightplace[0] = self.X + (self.size + self.size)
            self.eyerightplace[1] = self.Y
            self.eyesightsizeleft = [self.size, 15]
            self.eyesightsizeright = [15, self.size]

    @timing_function
    def reinit(self):
        self.printlog("{} reinit".format(
            datetime.datetime.fromtimestamp(time.mktime(time.gmtime()))))
        self.X = random.randint(0, self.eartsize[0])
        self.Y = random.randint(0, self.eartsize[1])
        self.randmovevector()
        self.seteyes()
        self.energy = 1
        self.shadow[:] = []
        self.movecount = 0
        self.startime = datetime.datetime.now()

    @timing_function
    def live(self, dontPrintInfo=False, test=False):

        self.test = test
        self.eatamount = 0
        # inputs are: energy 0, food avail 1, food left 2, food right 3, food color 4, color 5, meeting motko color 6
        # print ([self.energy, self.foodavail, self.foodInLeft, self.foodInRight, self.foodcolor, self.colornumber, self.meetinmotkocolor])
        # printlog(self.energy)
        neuraloutputs = self.responce([
            self.energy, self.foodavail, self.foodInLeft, self.foodInRight,
            self.foodcolor, self.colornumber, self.meetinmotkocolor
        ])
        logging.info("{}: {}".format([
            self.energy, self.foodavail, self.foodInLeft, self.foodInRight,
            self.foodcolor, self.colornumber, self.meetinmotkocolor
        ], neuraloutputs))
        if (dontPrintInfo):  # todo change that you can see output names
            self.printlog(
                "\n%s\n%s: %f: %f: %d\n%s: %f: %d" %
                ("eat\t\teata\tmove\ttleft\ttright\tkill\tflee\tsex",
                 "\t".join(
                     str(x) for x in self.roundfloat(self.trainingresult)),
                 self.foodavail, self.energy, self.colornumber, "\t".join(
                     str(x) for x in self.roundfloat(neuraloutputs)),
                 self.currenterror, len(self.ds)))
            # self.printlog("\n%s: \n%s: %f: %d" % (" ".join(str(x) for x in self.roundfloat([self.energy, self.foodavail, self.foodInLeft, self.foodInRight, self.foodcolor, self.colornumber, self.meetinmotkocolor])), " ".join(str(x) for x in self.roundfloat(neuraloutputs)), self.currenterror, len(self.ds)))

        self.eat = neuraloutputs[0]
        self.eatamount = neuraloutputs[1]
        self.move = neuraloutputs[2]
        self.turnleft = neuraloutputs[3]
        self.turnright = neuraloutputs[4]
        self.kill = neuraloutputs[5]
        self.flee = neuraloutputs[6]
        self.sex = neuraloutputs[7]
        # outputs are: eat 0, eat amount 1, move 2, turn left 4, turn tight 5, kill 6, flee 7, sex 8
        # eating
        if (self.eat > 0):
            if (self.foodavail > 0):
                self.energy = self.energy + self.eatamount
            # if(self.foodcolor == self.colornumber):  # eatinmg wrong food
            # printlog("dood by eating wrong color {} vs {}".format(self.foodcolor, self.colornumber))
            # self.doodReason = "exception dood"
            # return 1

        self.energy = self.energy - self.consumption

        if (self.directionvector[self.direction][0] == 0
                and self.directionvector[self.direction][1] == 0):
            self.randmovevector()

        self.speed = self.move * 10

        self.X += self.directionvector[self.direction][0] * int(self.speed)
        self.Y += self.directionvector[self.direction][1] * int(self.speed)

        self.shadow.append([self.X, self.Y])
        if len(self.shadow) >= self.shadowlength:
            del self.shadow[0]

        if (self.turnleft > 0.1 or self.turnright > 0.1
            ):  # othervice we would turn all the time, change in nessesary
            if (self.turnleft > self.turnright):  # move left
                if (self.direction % 2 == 0):
                    if (self.direction == 0):
                        self.direction = 6
                    else:
                        self.direction -= 2
                elif (self.direction == 0):
                    self.direction = 7
                else:
                    self.direction -= 1
            else:  # move right
                if (self.direction % 2 == 0):
                    if (self.direction == 6):
                        self.direction = 0
                    else:
                        self.direction += 2
                elif (self.direction == 7):
                    self.direction = 0
                else:
                    self.direction += 1

        self.seteyes()

        # motko size
        if (self.energy < 0):
            self.size = int(0.001 * 6)
        elif (self.energy > 1.3):
            self.size = int(1.3 * 6)
        else:
            self.size = int(self.energy * 6)

        if (self.X >= self.eartsize[0]):
            self.X = 0
            self.randmovevector()
            self.seteyes()
        elif (self.X <= 0):
            self.X = self.eartsize[0]
            self.randmovevector()
            self.seteyes()
        if (self.Y >= self.eartsize[1]):
            self.Y = 0
            self.randmovevector()
            self.seteyes()
        if (self.Y <= 0):
            self.Y = self.eartsize[1]
            self.randmovevector()
            self.seteyes()

        self.foodavail = 0
        self.foodInLeft = 0
        self.foodInRight = 0
        self.meetinmotkocolor = 4
        self.foodcolor = 4
        self.movecount += 1
        self.trainsteps += 1

    # print (self.roundfloat(trainingoutputs), self.roundfloat(neuraloutputs), self.roundfloat([self.energy, self.foodavail]))

    @timing_function
    def didoueat(self):
        # print ("didoueat", self.eatamount)
        return self.eatamount

    @timing_function
    def addfoodavail(self, addfood, foodcolor):
        # self.printlog("addfoodavail",self.foodavail, self.foodcolor)
        self.foodavail = addfood
        self.foodcolor = foodcolor

    @timing_function
    def foodleft(self, foodInLeft):
        # self.printlog(self.foodInLeft, foodInLeft)
        self.foodInLeft = foodInLeft

    @timing_function
    def foodright(self, foodInRight):
        self.foodInRight = foodInRight

    @timing_function
    def randmovevector(self):
        vectorok = 1
        temp = random.randint(0, 7)
        while (vectorok):
            if (temp == self.direction):
                temp = random.randint(0, 7)
            else:
                self.direction = temp
                vectorok = 0

    @timing_function
    def roundfloat(self, rounuppilist):
        roundedlist = []
        for i in range(len(rounuppilist)):
            roundedlist.append('{:.3f}'.format(rounuppilist[i]))
        return roundedlist

    @timing_function
    def getliveinfo(self):
        # time2 = datetime.datetime.fromtimestamp(time.mktime(time.gmtime()))
        # diff = time2 - self.startime
        return [
            round(self.energy, 4),
            self.filename.split('.')[0], self.movecount, self.trainings,
            self.currenterror
        ]
        # return [round(self.energy, 4), round(self.speed, 4), self.filename, diff.total_seconds(), self.movecount]

    @timing_function
    def getliveinfo2(self):
        returndata = "{}\n".format(self.filename.split('.')[0])
        returndata += "inLayer:{}\n".format(self.nn["in"])  # self.inLayer
        for i in range(len(self.hiddenlayers)):
            returndata += "\thidden{}:{}, neurons {}\n".format(
                i, self.hiddenlayers[i], self.hiddenLayerNeuronsAmount[i])
        returndata += "outLayer:{}\n".format(self.nn["out"])  # self.outLayer
        return returndata

    @timing_function
    def getliveinfo3(self):
        returndata = "{}\n".format(self.filename.split('.')[0])
        returndata += "inLayer:{}\n{}\n".format(
            self.nn["in"], self.connections[0].params)  # self.inLayer
        for i in range(len(self.hiddenlayers)):
            returndata += "\thidden{}:{}, neurons {}\n{}\n".format(
                i, self.hiddenlayers[i], self.hiddenLayerNeuronsAmount[i],
                self.connections[i + 1].params)

        returndata += "outLayer:{}\n{}\n".format(
            self.nn["out"], self.connections[len(self.connections) -
                                             1].params)  # self.outLayer
        returndata += "error:{}\n".format(self.currenterror)
        return returndata

    @timing_function
    def areyouallive(self):
        time2 = datetime.datetime.now()
        diff = time2 - self.startime
        if (self.test):
            if (self.energy < -5.00 or self.energy > 5.00):
                return (["dood", self.energy, self.move, self.trainings])
            if (diff.total_seconds() > 120):
                # self.saveLog(self.filename, self.nn.inspectTofile(), 'a+')
                return (["viable NN"])
            if (self.doodReason == "exception dood"):
                return (["dood"])
            return ["ok"]
        else:
            if (self.energy < -5.00 or self.energy > 5.00):
                self.energy = 2
                # return ["dood"]
            elif (self.trainings == 1000):
                return ["dood"]
            if (self.doodReason == "exception dood"):
                return (["dood"])
            return ["ok"]

    @timing_function
    def getname(self):
        return self.filename

    @timing_function
    def setname(self, name):
        self.filename = name

    @timing_function
    def getinputaproximation(input):
        if (input <= 0.0):
            return 0
        elif (0.0 < input <= 0.25):
            return 1
        elif (0.25 < input <= 0.50):
            return 2
        elif (0.50 < input <= 0.75):
            return 3
        elif (0.75 < input <= 1):
            return 4
        elif (input > 1):
            return 5

    @timing_function
    def leftorright(left, right):
        if (left > right):
            return 0
        elif (left <= right):
            return 1
        elif (left == 0.01 and right == 0.01):
            return 2

    @timing_function
    def printlog(self, message):
        print("%s %s:%s" %
              (str(datetime.datetime.now()), self.filename, message))

    @timing_function
    def eatcalc(self, inputs, neuronoutputs=None):
        # EAT energy 0, food avail 1, food left 2, food right 3, food color 4, color 5,
        outputs = [99, 99, 99, 99, 99]  # default response
        if (inputs[0] < 0.75):  # hungry
            if (
                    inputs[1] < inputs[2]
                    or (inputs[4] == inputs[5] and inputs[4] != 4)
            ):  # food in left more than front or food color is different than in your color meaning eatable
                # self.printlog("food in left more than front or food color is different than in your color meaning eatable")
                outputs[0] = 0  # dont eat
                outputs[1] = 0  # dont eat anything
                outputs[2] = 0.25  # move
                outputs[3] = 1  # turn left
                outputs[4] = 0  # do not turn right
            elif (inputs[1] < inputs[3]
                  or (inputs[4] == inputs[5]
                      and inputs[4] != 4)):  # food in right more than front
                # self.printlog(" # food in right more than front")
                outputs[0] = 0  # dont eat
                outputs[1] = 0  # dont eat anything
                outputs[2] = 0.25  # move
                outputs[3] = 0  # do not turn left
                outputs[4] = 1  # turn right
            elif (inputs[4] == inputs[5] and inputs[1] !=
                  0):  # food is same color dont eat it will kill you
                # self.printlog("# food is same color dont eat it will kill you")
                outputs[0] = 0  # dont eat
                outputs[1] = 0  # dont eat anything
                outputs[2] = 0.25  # move
                outputs[3] = 1  # turn right we prefer left
                outputs[4] = 0  # do not turn right
            elif (inputs[4] != inputs[5] and inputs[1] !=
                  0):  # food is different color than you, it is eatable
                # self.printlog("# food is different color than you, it is eatable")
                if ((inputs[0] + inputs[1]) < 1.5):  # food is not too mutch
                    # self.printlog("# food is not too mutch")
                    outputs[0] = 1  # eat
                    outputs[1] = inputs[1]  # eat all
                    outputs[2] = 0.25  # move
                    outputs[3] = 0  # turn right
                    outputs[4] = 0  # do not turn right
                    return outputs
                else:
                    # self.printlog("else # eat")
                    outputs[0] = 1  # eat
                    if (inputs[0] >= 0):
                        outputs[1] = (inputs[0] +
                                      inputs[1]) - 1  # Eat litle less
                    else:
                        outputs[1] = inputs[1]  # eat all
                    outputs[2] = 0.25  # move
                    outputs[3] = 0  # turn right
                    outputs[4] = 0  # do not turn right
                    return outputs
        if (0.75 < inputs[0] <= 1):  # hungry)
            outputs[2] = 0.25
        if (0.5 < inputs[0] <= 0.75):  # hungry)
            outputs[2] = 0.50
        if (0.25 < inputs[0] <= 0.50):  # hungry)
            outputs[2] = 0.75
        if (0.0 < inputs[0] <= 0.25):  # hungry)
            outputs[2] = 0.75
        if (0.0 > inputs[0]):  # hungry)
            outputs[2] = 1.25

        if (inputs[0] >= 1):  # full do not eat
            outputs[0] = 0  # dont eat
            outputs[1] = 0  # dont eat anything
        if (outputs[3] != 1 and outputs[4] != 1):  # dont turn
            outputs[3] = 0
            outputs[4] = 0
        if (inputs[4] == inputs[5] or inputs[4] == 4):  # definately dont eat
            outputs[0] = 0  # dont eat
            outputs[1] = 0  # dont eat anything

        for i in range(len(outputs)):
            if (outputs[i] == 99 and neuronoutputs
                    is not None):  # if no matter use what you got from ANN
                outputs[i] = neuronoutputs[i]
        return outputs

    @timing_function
    def contactcalc(self, inputs, neuronoutputs=None):
        # if same color as you then sex or flee if different color flee or kill. by killing you get enegry what other has
        outputs = [99, 99, 99]  # default response
        if (inputs[5] == inputs[6]
            ):  # same so flee or sex, energyamount defines what
            if (inputs[0] > 0.50):  # enought food to sex
                outputs[2] = inputs[0]
            else:
                outputs[1] = 1 - inputs[0]  # less amount food more fleeing
        else:
            if (inputs[0] > 0.5 and inputs[6] != 4):  # enought food to flee
                outputs[1] = inputs[0]
            elif (inputs[6] != 4):  # hungry so fight
                outputs[0] = 1 - inputs[0]

        for i in range(len(outputs)):
            if (outputs[i] == 99 and neuronoutputs is not None):
                outputs[i] = neuronoutputs[i + 4]

        return outputs

    @timing_function
    def gettraining2(self, inputs, neuronoutputs=None):
        # inputs are: energy 0, food avail 1, food left 2, food right 3, food color 4, color 5, meeting motko color 6,
        # outputs are: eat 0, eat amount 1, move 2, turn left 3, turn tight 4, kill 5, flee 6, sex 7
        # divide trainign to smaller parts
        eatoutputs = self.eatcalc(
            inputs, neuronoutputs
        )  # eat 0, eat amount 1, move 2, turn left 3, turn tight 4
        contactouputs = self.contactcalc(
            inputs, neuronoutputs)  # kill 5, flee 6, sex 7
        # print (inputs, (eatoutputs+contactouputs))
        return eatoutputs + contactouputs
            final_dataset = [temp_dataset]

        else:
            final = np.concatenate((final, [temp]), axis=0)
            final_fft = np.concatenate((final_fft, [temp_fft]), axis=0)
            final_mfcc = np.concatenate((final_mfcc, [temp_mfcc]), axis=0)
            final_delta_mfcc = np.concatenate(
                (final_delta_mfcc, [temp_delta_mfcc]), axis=0)
            final_delta_delta_mfcc = np.concatenate(
                (final_delta_delta_mfcc, [temp_delta_delta_mfcc]), axis=0)
            final_dataset = np.concatenate((final_dataset, [temp_dataset]),
                                           axis=0)

    dataset_op = 0
    dataset_ip = 0
    ds.clear()
    parameters = np.load('parameters.npy')
    LSTMre._setParameters(parameters)

    output = np.empty((100, 5))
    #print(LSTMre.params)
    print('____________>output')
    for i in range(100):
        output[i] = LSTMre.activate(final_dataset[i])
    final_op = np.mean(output, axis=0)
    print('YOU SAID--------------->')
    state = generate_target.final_output(final_op)

    #is_it_correct = input('Is it correct?')
    '''if is_it_correct == 'n'
        is_correct = False
class PyImpNetwork():

    def __init__(self):
        
        #flags for program learning states
        self.learning = 0
        self.compute = 0
        self.recurrent_flag = False; # default case is a nonrecurrent feedforward network

        #number of mapper inputs and outputs
        self.num_inputs = 0
        self.num_outputs = 0
        self.num_hidden = 0

        #For the Mapper Signals
        self.l_inputs = {}
        self.l_outputs = {}

        #For the Artificial Neural Network
        self.data_input = {}
        self.data_output = {}

        self.learnMapperDevice = mapper.device("Implicit_LearnMapper",9002)

    # mapper signal handler (updates self.data_input[sig_indx]=new_float_value)
    def h(self,sig, f):
        try:
            #print sig.name
            if '/in' in sig.name:
                s_indx = str.split(sig.name,"/in")
                self.data_input[int(s_indx[1])]=float(f)

            elif '/out' in sig.name:
                if (self.learning == 1):
                    print "FOUND /out and in learn mode", f
                    s_indx = str.split(sig.name,"/out")
                    self.data_output[int(s_indx[1])]=float(f)
                    print self.data_output[int(s_indx[1])]
        except:
            print "Exception, Handler not working"

    def hout(self,sig,f):
        try:
            if '/out' in sig.name:
                if (self.learning == 1):
                    print "FOUND /out and in learn mode", f
                    s_indx = str.split(sig.name,"/out")
                    self.data_output[int(s_indx[1])] = float(f)
                    print "Value saved to data_output", self.data_output[int(s_indx[1])]
        except:
            print "Exception, Handler not working"


    def createANN(self,n_inputs,n_hidden,n_outputs):
        #create ANN
        self.net = buildNetwork(n_inputs,n_hidden,n_outputs,bias=True, hiddenclass=SigmoidLayer, outclass=SigmoidLayer, recurrent=self.recurrent_flag)
        
        #create ANN Dataset
        self.ds = SupervisedDataSet(n_inputs,n_outputs)

    def createMapperInputs(self,n_inputs):
        #create mapper signals (inputs)
        for l_num in range(n_inputs):
            self.l_inputs[l_num] = self.learnMapperDevice.add_input("/in%d"%l_num, 1, 'f',None,0,1.0, self.h)
            print ("creating input", "/in"+str(l_num))

        # Set initial Data Input values for Network to 0
        for s_index in range(n_inputs):
            self.data_input[s_index] = 0.0

    def createMapperOutputs(self,n_outputs):
        #create mapper signals (n_outputs)
        for l_num in range(n_outputs):
            self.l_outputs[l_num] = self.learnMapperDevice.add_output("/out%d"%l_num, 1, 'f',None,0.0,1.0)
            self.l_outputs[l_num].set_query_callback(self.hout)
            print ("creating output","/out"+str(l_num))
        
        # Set initial Data Output values for Network to 0
        for s_index in range (n_outputs):
            self.data_output[s_index] = 0.0

    def setNumInputs(self,n_inputs):
        self.num_inputs = n_inputs

    def setNumeOutputs(self,n_outputs):
        self.num_outputs = n_outputs

    def setNumHiddenNodes(self,n_hidden):
        self.num_hidden = n_hidden

    def setReccurentFlag(self,flag):
        if (flag == "R"):
            self.recurrent_flag=True
        elif (flag == "F"):
            self.recurrent_flag=False
  
    def load_dataset(self,open_filename):
        self.ds = SupervisedDataSet.loadFromFile(open_filename)
        #print self.ds

    def save_dataset(self,filename):

        if str(filename[0]) != '': 
            csv_file = open(filename[0]+".csv", "w")
            csv_file.write("[inputs][outputs]\r\n")
        
        for inpt, tgt in self.ds:
                new_str=str("{" + repr(inpt) + "," + repr(tgt) + "}")
                new_str=new_str.strip('\n')
                new_str=new_str.strip('\r')
                new_str=new_str+"\r"
                csv_file.write(new_str)

        if len(new_str)>1: 
            csv_file.close()

    def save_net(self,save_filename):
        networkwriter.NetworkWriter.writeToFile(net,save_filename)

    def load_net(self,open_filename):
        from pybrain.tools.customxml import networkreader
        self.net = networkreader.NetworkReader.readFrom(open_filename)

    def clear_dataset(self):
        if self.ds != 0:
            self.ds.clear()

    def clear_network(self):
        #resets the module buffers but doesn't reinitialise the connection weights
        #TODO: reinitialise network here or make a new option for it.
        self.net.reset()

    def learn_callback(self):

        if self.learning == 0:
            print ("learning is", self.learning)
            self.learning = 1

        elif self.learning == 1:
            print ("learning is", self.learning)
            self.learning = 0

    def compute_callback(self):

        if self.compute==1:
            self.compute =0
            print ("Compute network output is now OFF!")
        elif self.compute ==0:
            self.compute =1
            print ("Compute network output is now ON!")

    def train_callback(self):
        self.trainer = BackpropTrainer(self.net, learningrate=0.01, lrdecay=1, momentum=0.0, verbose=True)
        
        print 'MSE before', self.trainer.testOnData(self.ds, verbose=True)
        epoch_count = 0
        while epoch_count < 1000:
            epoch_count += 10
            self.trainer.trainUntilConvergence(dataset=self.ds, maxEpochs=10)
            networkwriter.NetworkWriter.writeToFile(self.net,'autosave.network')
        
        print 'MSE after', self.trainer.testOnData(self.ds, verbose=True)
        print ("\n")
        print 'Total epochs:', self.trainer.totalepochs

    def main_loop(self):
        self.learnMapperDevice.poll(1)

        if ((self.learning == 1) and (self.compute == 0)):
            
            # Query output values upon change in GUI
            for index in range(self.num_outputs):
                self.data_output[index] = self.l_outputs[index].query_remote()
                print self.data_output[index]

            print ("Inputs: ")
            print (tuple(self.data_input.values()))
            print ("Outputs: ")
            print (tuple(self.data_output.values()))

            self.ds.addSample(tuple(self.data_input.values()),tuple(self.data_output.values()))
        
        if ((self.compute == 1) and (self.learning == 0)):
            activated_out = self.net.activate(tuple(self.data_input.values()))

            for out_index in range(self.num_outputs):
                self.data_output[out_index] = activated_out[out_index]
                self.l_outputs[out_index].update(self.data_output[out_index])